if (coord_components > 0 &&
(has_lod || shadow_c.file != BAD_FILE ||
(op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
- for (unsigned i = coord_components; i < 3; i++)
+ assert(coord_components <= 3);
+ for (unsigned i = 0; i < 3 - coord_components; i++)
bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
msg_end = offset(msg_end, bld, 3 - coord_components);
}
void
-fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
+fs_visitor::allocate_registers(bool allow_spilling)
{
bool allocated;
fixup_3src_null_dest();
}
-
- /* We only allow spilling for the last schedule mode and only if the
- * allow_spilling parameter and dispatch width work out ok.
- */
bool can_spill = allow_spilling &&
- (i == ARRAY_SIZE(pre_modes) - 1) &&
- dispatch_width == min_dispatch_width;
+ (i == ARRAY_SIZE(pre_modes) - 1);
/* We should only spill registers on the last scheduling. */
assert(!spilled_any_registers);
}
if (!allocated) {
- if (!allow_spilling)
- fail("Failure to register allocate and spilling is not allowed.");
-
- /* We assume that any spilling is worse than just dropping back to
- * SIMD8. There's probably actually some intermediate point where
- * SIMD16 with a couple of spills is still better.
- */
- if (dispatch_width > min_dispatch_width) {
- fail("Failure to register allocate. Reduce number of "
- "live scalar values to avoid this.");
- }
-
- /* If we failed to allocate, we must have a reason */
- assert(failed);
+ fail("Failure to register allocate. Reduce number of "
+ "live scalar values to avoid this.");
} else if (spilled_any_registers) {
compiler->shader_perf_log(log_data,
"%s shader triggered register spilling. "
assign_vs_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
assign_tcs_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
assign_tes_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
assign_gs_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
}
}
+ if (nir->info.writes_memory)
+ wm_prog_data->has_side_effects = true;
+
emit_nir_code();
if (failed)
assign_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, allow_spilling);
+
+ allocate_registers(allow_spilling);
if (failed)
return false;
}
bool
-fs_visitor::run_cs(unsigned min_dispatch_width)
+fs_visitor::run_cs(bool allow_spilling)
{
assert(stage == MESA_SHADER_COMPUTE);
- assert(dispatch_width >= min_dispatch_width);
setup_cs_payload();
assign_curb_setup();
fixup_3src_null_dest();
- allocate_registers(min_dispatch_width, true);
+ allocate_registers(allow_spilling);
if (failed)
return false;
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
float throughput = 0;
+ bool has_spilled = false;
v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
&prog_data->base, shader, 8, shader_time_index8);
prog_data->reg_blocks_8 = brw_register_blocks(v8->grf_used);
const performance &perf = v8->performance_analysis.require();
throughput = MAX2(throughput, perf.throughput);
+ has_spilled = v8->spilled_any_registers;
+ allow_spilling = false;
}
/* Limit dispatch width to simd8 with dual source blending on gen8.
- * See: https://gitlab.freedesktop.org/mesa/mesa/issues/1917
+ * See: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1917
*/
if (devinfo->gen == 8 && prog_data->dual_src_blend &&
!(INTEL_DEBUG & DEBUG_NO8)) {
"using SIMD8 when dual src blending.\n");
}
- if (v8->max_dispatch_width >= 16 &&
+ if (!has_spilled &&
+ v8->max_dispatch_width >= 16 &&
likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
/* Try a SIMD16 compile */
v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
prog_data->reg_blocks_16 = brw_register_blocks(v16->grf_used);
const performance &perf = v16->performance_analysis.require();
throughput = MAX2(throughput, perf.throughput);
+ has_spilled = v16->spilled_any_registers;
+ allow_spilling = false;
}
}
/* Currently, the compiler only supports SIMD32 on SNB+ */
- if (v8->max_dispatch_width >= 32 && !use_rep_send &&
+ if (!has_spilled &&
+ v8->max_dispatch_width >= 32 && !use_rep_send &&
devinfo->gen >= 6 && simd16_cfg &&
!(INTEL_DEBUG & DEBUG_NO32)) {
/* Try a SIMD32 compile */
prog_data->nr_params);
}
+static bool
+filter_simd(const nir_instr *instr, const void *_options)
+{
+ if (instr->type != nir_instr_type_intrinsic)
+ return false;
+
+ switch (nir_instr_as_intrinsic(instr)->intrinsic) {
+ case nir_intrinsic_load_simd_width_intel:
+ case nir_intrinsic_load_subgroup_id:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static nir_ssa_def *
+lower_simd(nir_builder *b, nir_instr *instr, void *options)
+{
+ uintptr_t simd_width = (uintptr_t)options;
+
+ switch (nir_instr_as_intrinsic(instr)->intrinsic) {
+ case nir_intrinsic_load_simd_width_intel:
+ return nir_imm_int(b, simd_width);
+
+ case nir_intrinsic_load_subgroup_id:
+ /* If the whole workgroup fits in one thread, we can lower subgroup_id
+ * to a constant zero.
+ */
+ if (!b->shader->info.cs.local_size_variable) {
+ unsigned local_workgroup_size = b->shader->info.cs.local_size[0] *
+ b->shader->info.cs.local_size[1] *
+ b->shader->info.cs.local_size[2];
+ if (local_workgroup_size <= simd_width)
+ return nir_imm_int(b, 0);
+ }
+ return NULL;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+brw_nir_lower_simd(nir_shader *nir, unsigned dispatch_width)
+{
+ nir_shader_lower_instructions(nir, filter_simd, lower_simd,
+ (void *)(uintptr_t)dispatch_width);
+}
+
static nir_shader *
compile_cs_to_nir(const struct brw_compiler *compiler,
void *mem_ctx,
nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
brw_nir_apply_key(shader, compiler, &key->base, dispatch_width, true);
- NIR_PASS_V(shader, brw_nir_lower_cs_intrinsics, dispatch_width);
+ NIR_PASS_V(shader, brw_nir_lower_simd, dispatch_width);
/* Clean up after the local index and ID calculations. */
NIR_PASS_V(shader, nir_opt_constant_folding);
prog_data->base.total_shared = src_shader->info.cs.shared_size;
prog_data->slm_size = src_shader->num_shared;
- unsigned local_workgroup_size;
+ /* Generate code for all the possible SIMD variants. */
+ bool generate_all;
+
+ unsigned min_dispatch_width;
+ unsigned max_dispatch_width;
+
if (src_shader->info.cs.local_size_variable) {
- local_workgroup_size = src_shader->info.cs.max_variable_local_size;
+ generate_all = true;
+ min_dispatch_width = 8;
+ max_dispatch_width = 32;
} else {
+ generate_all = false;
prog_data->local_size[0] = src_shader->info.cs.local_size[0];
prog_data->local_size[1] = src_shader->info.cs.local_size[1];
prog_data->local_size[2] = src_shader->info.cs.local_size[2];
- local_workgroup_size = src_shader->info.cs.local_size[0] *
- src_shader->info.cs.local_size[1] * src_shader->info.cs.local_size[2];
- }
-
- /* Limit max_threads to 64 for the GPGPU_WALKER command */
- const uint32_t max_threads = MIN2(64, compiler->devinfo->max_cs_threads);
- unsigned min_dispatch_width =
- DIV_ROUND_UP(local_workgroup_size, max_threads);
- min_dispatch_width = MAX2(8, min_dispatch_width);
- min_dispatch_width = util_next_power_of_two(min_dispatch_width);
- assert(min_dispatch_width <= 32);
- unsigned max_dispatch_width = 32;
+ unsigned local_workgroup_size = prog_data->local_size[0] *
+ prog_data->local_size[1] *
+ prog_data->local_size[2];
- fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
- fs_visitor *v = NULL;
- const char *fail_msg = NULL;
+ /* Limit max_threads to 64 for the GPGPU_WALKER command */
+ const uint32_t max_threads = MIN2(64, compiler->devinfo->max_cs_threads);
+ min_dispatch_width = util_next_power_of_two(
+ MAX2(8, DIV_ROUND_UP(local_workgroup_size, max_threads)));
+ assert(min_dispatch_width <= 32);
+ max_dispatch_width = 32;
+ }
if ((int)key->base.subgroup_size_type >= (int)BRW_SUBGROUP_SIZE_REQUIRE_8) {
/* These enum values are expressly chosen to be equal to the subgroup
required_dispatch_width == 32);
if (required_dispatch_width < min_dispatch_width ||
required_dispatch_width > max_dispatch_width) {
- fail_msg = "Cannot satisfy explicit subgroup size";
- } else {
- min_dispatch_width = max_dispatch_width = required_dispatch_width;
+ if (error_str) {
+ *error_str = ralloc_strdup(mem_ctx,
+ "Cannot satisfy explicit subgroup size");
+ }
+ return NULL;
}
+ min_dispatch_width = max_dispatch_width = required_dispatch_width;
}
- /* Now the main event: Visit the shader IR and generate our CS IR for it.
- */
- if (!fail_msg && min_dispatch_width <= 8 && max_dispatch_width >= 8) {
+ assert(min_dispatch_width <= max_dispatch_width);
+
+ fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
+ fs_visitor *v = NULL;
+
+ if (likely(!(INTEL_DEBUG & DEBUG_NO8)) &&
+ min_dispatch_width <= 8 && max_dispatch_width >= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 8);
v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
&prog_data->base,
nir8, 8, shader_time_index);
- if (!v8->run_cs(min_dispatch_width)) {
- fail_msg = v8->fail_msg;
- } else {
- /* We should always be able to do SIMD32 for compute shaders */
- assert(v8->max_dispatch_width >= 32);
-
- v = v8;
- prog_data->simd_size = 8;
- cs_fill_push_const_info(compiler->devinfo, prog_data);
+ if (!v8->run_cs(true /* allow_spilling */)) {
+ if (error_str)
+ *error_str = ralloc_strdup(mem_ctx, v8->fail_msg);
+ delete v8;
+ return NULL;
}
+
+ /* We should always be able to do SIMD32 for compute shaders */
+ assert(v8->max_dispatch_width >= 32);
+
+ v = v8;
+ prog_data->prog_mask |= 1 << 0;
+ if (v8->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 0;
+ cs_fill_push_const_info(compiler->devinfo, prog_data);
}
if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
- !fail_msg && min_dispatch_width <= 16 && max_dispatch_width >= 16) {
+ (generate_all || !prog_data->prog_spilled) &&
+ min_dispatch_width <= 16 && max_dispatch_width >= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 16);
if (v8)
v16->import_uniforms(v8);
- if (!v16->run_cs(min_dispatch_width)) {
+ const bool allow_spilling = generate_all || v == NULL;
+ if (!v16->run_cs(allow_spilling)) {
compiler->shader_perf_log(log_data,
"SIMD16 shader failed to compile: %s",
v16->fail_msg);
if (!v) {
- fail_msg =
- "Couldn't generate SIMD16 program and not "
- "enough threads for SIMD8";
+ assert(v8 == NULL);
+ if (error_str) {
+ *error_str = ralloc_asprintf(
+ mem_ctx, "Not enough threads for SIMD8 and "
+ "couldn't generate SIMD16: %s", v16->fail_msg);
+ }
+ delete v16;
+ return NULL;
}
} else {
/* We should always be able to do SIMD32 for compute shaders */
assert(v16->max_dispatch_width >= 32);
v = v16;
- prog_data->simd_size = 16;
+ prog_data->prog_mask |= 1 << 1;
+ if (v16->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 1;
cs_fill_push_const_info(compiler->devinfo, prog_data);
}
}
- /* We should always be able to do SIMD32 for compute shaders */
- assert(!v16 || v16->max_dispatch_width >= 32);
-
- if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32)) &&
- max_dispatch_width >= 32) {
+ /* The SIMD32 is only enabled for cases it is needed unless forced.
+ *
+ * TODO: Use performance_analysis and drop this boolean.
+ */
+ const bool needs_32 = min_dispatch_width > 16 ||
+ (INTEL_DEBUG & DEBUG_DO32) ||
+ generate_all;
+
+ if (likely(!(INTEL_DEBUG & DEBUG_NO32)) &&
+ (generate_all || !prog_data->prog_spilled) &&
+ needs_32 &&
+ min_dispatch_width <= 32 && max_dispatch_width >= 32) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 32);
else if (v16)
v32->import_uniforms(v16);
- if (!v32->run_cs(min_dispatch_width)) {
+ const bool allow_spilling = generate_all || v == NULL;
+ if (!v32->run_cs(allow_spilling)) {
compiler->shader_perf_log(log_data,
"SIMD32 shader failed to compile: %s",
v32->fail_msg);
if (!v) {
- fail_msg =
- "Couldn't generate SIMD32 program and not "
- "enough threads for SIMD16";
+ assert(v8 == NULL);
+ assert(v16 == NULL);
+ if (error_str) {
+ *error_str = ralloc_asprintf(
+ mem_ctx, "Not enough threads for SIMD16 and "
+ "couldn't generate SIMD32: %s", v32->fail_msg);
+ }
+ delete v32;
+ return NULL;
}
} else {
v = v32;
- prog_data->simd_size = 32;
+ prog_data->prog_mask |= 1 << 2;
+ if (v32->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 2;
cs_fill_push_const_info(compiler->devinfo, prog_data);
}
}
- const unsigned *ret = NULL;
- if (unlikely(v == NULL)) {
- assert(fail_msg);
- if (error_str)
- *error_str = ralloc_strdup(mem_ctx, fail_msg);
- } else {
- fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
- if (INTEL_DEBUG & DEBUG_CS) {
- char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- src_shader->info.label ?
- src_shader->info.label : "unnamed",
- src_shader->info.name);
- g.enable_debug(name);
+ if (unlikely(!v && (INTEL_DEBUG & (DEBUG_NO8 | DEBUG_NO16 | DEBUG_NO32)))) {
+ if (error_str) {
+ *error_str =
+ ralloc_strdup(mem_ctx,
+ "Cannot satisfy INTEL_DEBUG flags SIMD restrictions");
}
+ return NULL;
+ }
- g.generate_code(v->cfg, prog_data->simd_size, v->shader_stats,
- v->performance_analysis.require(), stats);
+ assert(v);
- ret = g.get_assembly();
+ const unsigned *ret = NULL;
+
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
+ v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
+ if (INTEL_DEBUG & DEBUG_CS) {
+ char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
+ src_shader->info.label ?
+ src_shader->info.label : "unnamed",
+ src_shader->info.name);
+ g.enable_debug(name);
+ }
+
+ if (generate_all) {
+ if (prog_data->prog_mask & (1 << 0)) {
+ assert(v8);
+ prog_data->prog_offset[0] =
+ g.generate_code(v8->cfg, 8, v8->shader_stats,
+ v8->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+
+ if (prog_data->prog_mask & (1 << 1)) {
+ assert(v16);
+ prog_data->prog_offset[1] =
+ g.generate_code(v16->cfg, 16, v16->shader_stats,
+ v16->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+
+ if (prog_data->prog_mask & (1 << 2)) {
+ assert(v32);
+ prog_data->prog_offset[2] =
+ g.generate_code(v32->cfg, 32, v32->shader_stats,
+ v32->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+ } else {
+ /* Only one dispatch width will be valid, and will be at offset 0,
+ * which is already the default value of prog_offset_* fields.
+ */
+ prog_data->prog_mask = 1 << (v->dispatch_width / 16);
+ g.generate_code(v->cfg, v->dispatch_width, v->shader_stats,
+ v->performance_analysis.require(), stats);
}
+ ret = g.get_assembly();
+
delete v8;
delete v16;
delete v32;
return ret;
}
+unsigned
+brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo,
+ const struct brw_cs_prog_data *cs_prog_data,
+ unsigned group_size)
+{
+ const unsigned mask = cs_prog_data->prog_mask;
+ assert(mask != 0);
+
+ static const unsigned simd8 = 1 << 0;
+ static const unsigned simd16 = 1 << 1;
+ static const unsigned simd32 = 1 << 2;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DO32) && (mask & simd32))
+ return 32;
+
+ /* Limit max_threads to 64 for the GPGPU_WALKER command */
+ const uint32_t max_threads = MIN2(64, devinfo->max_cs_threads);
+
+ if ((mask & simd8) && group_size <= 8 * max_threads) {
+ /* Prefer SIMD16 if can do without spilling. Matches logic in
+ * brw_compile_cs.
+ */
+ if ((mask & simd16) && (~cs_prog_data->prog_spilled & simd16))
+ return 16;
+ return 8;
+ }
+
+ if ((mask & simd16) && group_size <= 16 * max_threads)
+ return 16;
+
+ assert(mask & simd32);
+ assert(group_size <= 32 * max_threads);
+ return 32;
+}
+
/**
* Test the dispatch mask packing assumptions of
* brw_stage_has_packed_dispatch(). Call this from e.g. the top of