* dependencies, and to avoid having to deal with aligning its regs to 2.
*/
const fs_builder ubld = bld.annotate("send dependency resolve")
- .half(0);
+ .quarter(0);
ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
}
} else {
/* Platform doesn't have COMPR4. We have to fake it */
fs_reg mov_dst = retype(dst, inst->src[i].type);
- ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
+ ibld.quarter(0).MOV(mov_dst, quarter(inst->src[i], 0));
mov_dst.nr += 4;
- ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
+ ibld.quarter(1).MOV(mov_dst, quarter(inst->src[i], 1));
}
}
retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD),
};
ubld.LOAD_PAYLOAD(header, header_sources, 2, 0);
+
+ /* Gen12 will require additional fix-ups if we ever hit this path. */
+ assert(devinfo->gen < 12);
}
uint32_t g00_bits = 0;
static void
lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
{
+ const gen_device_info *devinfo = bld.shader->devinfo;
const fs_builder &ubld = bld.exec_all().group(8, 0);
const unsigned length = 2;
const fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, length);
retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD)
};
ubld.LOAD_PAYLOAD(header, header_sources, ARRAY_SIZE(header_sources), 0);
+
+ if (devinfo->gen >= 12) {
+ /* On Gen12 the Viewport and Render Target Array Index fields (AKA
+ * Poly 0 Info) are provided in r1.1 instead of r0.0, and the render
+ * target message header format was updated accordingly -- However
+ * the updated format only works for the lower 16 channels in a
+ * SIMD32 thread, since the higher 16 channels want the subspan data
+ * from r2 instead of r1, so we need to copy over the contents of
+ * r1.1 in order to fix things up.
+ */
+ ubld.group(1, 0).MOV(component(header, 9),
+ retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_UD));
+ }
}
inst->resize_sources(1);
if (coord_components > 0 &&
(has_lod || shadow_c.file != BAD_FILE ||
(op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
- for (unsigned i = coord_components; i < 3; i++)
+ assert(coord_components <= 3);
+ for (unsigned i = 0; i < 3 - coord_components; i++)
bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
msg_end = offset(msg_end, bld, 3 - coord_components);
}
void
-fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
+fs_visitor::allocate_registers(bool allow_spilling)
{
bool allocated;
fixup_3src_null_dest();
}
-
- /* We only allow spilling for the last schedule mode and only if the
- * allow_spilling parameter and dispatch width work out ok.
- */
bool can_spill = allow_spilling &&
- (i == ARRAY_SIZE(pre_modes) - 1) &&
- dispatch_width == min_dispatch_width;
+ (i == ARRAY_SIZE(pre_modes) - 1);
/* We should only spill registers on the last scheduling. */
assert(!spilled_any_registers);
}
if (!allocated) {
- if (!allow_spilling)
- fail("Failure to register allocate and spilling is not allowed.");
-
- /* We assume that any spilling is worse than just dropping back to
- * SIMD8. There's probably actually some intermediate point where
- * SIMD16 with a couple of spills is still better.
- */
- if (dispatch_width > min_dispatch_width) {
- fail("Failure to register allocate. Reduce number of "
- "live scalar values to avoid this.");
- }
-
- /* If we failed to allocate, we must have a reason */
- assert(failed);
+ fail("Failure to register allocate. Reduce number of "
+ "live scalar values to avoid this.");
} else if (spilled_any_registers) {
compiler->shader_perf_log(log_data,
"%s shader triggered register spilling. "
assign_vs_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
assign_tcs_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
assign_tes_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
assign_gs_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, true);
+ allocate_registers(true /* allow_spilling */);
return !failed;
}
}
}
+ if (nir->info.writes_memory)
+ wm_prog_data->has_side_effects = true;
+
emit_nir_code();
if (failed)
assign_urb_setup();
fixup_3src_null_dest();
- allocate_registers(8, allow_spilling);
+
+ allocate_registers(allow_spilling);
if (failed)
return false;
}
bool
-fs_visitor::run_cs(unsigned min_dispatch_width)
+fs_visitor::run_cs(bool allow_spilling)
{
assert(stage == MESA_SHADER_COMPUTE);
- assert(dispatch_width >= min_dispatch_width);
setup_cs_payload();
assign_curb_setup();
fixup_3src_null_dest();
- allocate_registers(min_dispatch_width, true);
+ allocate_registers(allow_spilling);
if (failed)
return false;
char **error_str)
{
const struct gen_device_info *devinfo = compiler->devinfo;
-
- unsigned max_subgroup_size = unlikely(INTEL_DEBUG & DEBUG_DO32) ? 32 : 16;
+ const unsigned max_subgroup_size = compiler->devinfo->gen >= 6 ? 32 : 16;
brw_nir_apply_key(shader, compiler, &key->base, max_subgroup_size, true);
brw_nir_lower_fs_inputs(shader, devinfo, key);
calculate_urb_setup(devinfo, key, prog_data, shader);
brw_compute_flat_inputs(prog_data, shader);
+ fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
- struct shader_stats v8_shader_stats, v16_shader_stats, v32_shader_stats;
+ float throughput = 0;
+ bool has_spilled = false;
- fs_visitor v8(compiler, log_data, mem_ctx, &key->base,
- &prog_data->base, shader, 8,
- shader_time_index8);
- if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
+ v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base, shader, 8, shader_time_index8);
+ if (!v8->run_fs(allow_spilling, false /* do_rep_send */)) {
if (error_str)
- *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
+ *error_str = ralloc_strdup(mem_ctx, v8->fail_msg);
+ delete v8;
return NULL;
} else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
- simd8_cfg = v8.cfg;
- v8_shader_stats = v8.shader_stats;
- prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
- prog_data->reg_blocks_8 = brw_register_blocks(v8.grf_used);
+ simd8_cfg = v8->cfg;
+ prog_data->base.dispatch_grf_start_reg = v8->payload.num_regs;
+ prog_data->reg_blocks_8 = brw_register_blocks(v8->grf_used);
+ const performance &perf = v8->performance_analysis.require();
+ throughput = MAX2(throughput, perf.throughput);
+ has_spilled = v8->spilled_any_registers;
+ allow_spilling = false;
}
/* Limit dispatch width to simd8 with dual source blending on gen8.
- * See: https://gitlab.freedesktop.org/mesa/mesa/issues/1917
+ * See: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1917
*/
if (devinfo->gen == 8 && prog_data->dual_src_blend &&
!(INTEL_DEBUG & DEBUG_NO8)) {
assert(!use_rep_send);
- v8.limit_dispatch_width(8, "gen8 workaround: "
- "using SIMD8 when dual src blending.\n");
+ v8->limit_dispatch_width(8, "gen8 workaround: "
+ "using SIMD8 when dual src blending.\n");
}
- if (v8.max_dispatch_width >= 16 &&
+ if (!has_spilled &&
+ v8->max_dispatch_width >= 16 &&
likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
/* Try a SIMD16 compile */
- fs_visitor v16(compiler, log_data, mem_ctx, &key->base,
- &prog_data->base, shader, 16,
- shader_time_index16);
- v16.import_uniforms(&v8);
- if (!v16.run_fs(allow_spilling, use_rep_send)) {
+ v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base, shader, 16, shader_time_index16);
+ v16->import_uniforms(v8);
+ if (!v16->run_fs(allow_spilling, use_rep_send)) {
compiler->shader_perf_log(log_data,
"SIMD16 shader failed to compile: %s",
- v16.fail_msg);
+ v16->fail_msg);
} else {
- simd16_cfg = v16.cfg;
- v16_shader_stats = v16.shader_stats;
- prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
- prog_data->reg_blocks_16 = brw_register_blocks(v16.grf_used);
+ simd16_cfg = v16->cfg;
+ prog_data->dispatch_grf_start_reg_16 = v16->payload.num_regs;
+ prog_data->reg_blocks_16 = brw_register_blocks(v16->grf_used);
+ const performance &perf = v16->performance_analysis.require();
+ throughput = MAX2(throughput, perf.throughput);
+ has_spilled = v16->spilled_any_registers;
+ allow_spilling = false;
}
}
/* Currently, the compiler only supports SIMD32 on SNB+ */
- if (v8.max_dispatch_width >= 32 && !use_rep_send &&
- compiler->devinfo->gen >= 6 &&
- unlikely(INTEL_DEBUG & DEBUG_DO32)) {
+ if (!has_spilled &&
+ v8->max_dispatch_width >= 32 && !use_rep_send &&
+ devinfo->gen >= 6 && simd16_cfg &&
+ !(INTEL_DEBUG & DEBUG_NO32)) {
/* Try a SIMD32 compile */
- fs_visitor v32(compiler, log_data, mem_ctx, &key->base,
- &prog_data->base, shader, 32,
- shader_time_index32);
- v32.import_uniforms(&v8);
- if (!v32.run_fs(allow_spilling, false)) {
+ v32 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base, shader, 32, shader_time_index32);
+ v32->import_uniforms(v8);
+ if (!v32->run_fs(allow_spilling, false)) {
compiler->shader_perf_log(log_data,
"SIMD32 shader failed to compile: %s",
- v32.fail_msg);
+ v32->fail_msg);
} else {
- simd32_cfg = v32.cfg;
- v32_shader_stats = v32.shader_stats;
- prog_data->dispatch_grf_start_reg_32 = v32.payload.num_regs;
- prog_data->reg_blocks_32 = brw_register_blocks(v32.grf_used);
+ const performance &perf = v32->performance_analysis.require();
+
+ if (!(INTEL_DEBUG & DEBUG_DO32) && throughput >= perf.throughput) {
+ compiler->shader_perf_log(log_data, "SIMD32 shader inefficient\n");
+ } else {
+ simd32_cfg = v32->cfg;
+ prog_data->dispatch_grf_start_reg_32 = v32->payload.num_regs;
+ prog_data->reg_blocks_32 = brw_register_blocks(v32->grf_used);
+ throughput = MAX2(throughput, perf.throughput);
+ }
}
}
if (prog_data->persample_dispatch) {
/* Starting with SandyBridge (where we first get MSAA), the different
* pixel dispatch combinations are grouped into classifications A
- * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
+ * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On most hardware
* generations, the only configurations supporting persample dispatch
- * are are this in which only one dispatch width is enabled.
+ * are those in which only one dispatch width is enabled.
+ *
+ * The Gen12 hardware spec has a similar dispatch grouping table, but
+ * the following conflicting restriction applies (from the page on
+ * "Structure_3DSTATE_PS_BODY"), so we need to keep the SIMD16 shader:
+ *
+ * "SIMD32 may only be enabled if SIMD16 or (dual)SIMD8 is also
+ * enabled."
*/
if (simd32_cfg || simd16_cfg)
simd8_cfg = NULL;
- if (simd32_cfg)
+ if (simd32_cfg && devinfo->gen < 12)
simd16_cfg = NULL;
}
fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- v8.runtime_check_aads_emit, MESA_SHADER_FRAGMENT);
+ v8->runtime_check_aads_emit, MESA_SHADER_FRAGMENT);
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
if (simd8_cfg) {
prog_data->dispatch_8 = true;
- g.generate_code(simd8_cfg, 8, v8_shader_stats, stats);
+ g.generate_code(simd8_cfg, 8, v8->shader_stats,
+ v8->performance_analysis.require(), stats);
stats = stats ? stats + 1 : NULL;
}
if (simd16_cfg) {
prog_data->dispatch_16 = true;
- prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16, v16_shader_stats, stats);
+ prog_data->prog_offset_16 = g.generate_code(
+ simd16_cfg, 16, v16->shader_stats,
+ v16->performance_analysis.require(), stats);
stats = stats ? stats + 1 : NULL;
}
if (simd32_cfg) {
prog_data->dispatch_32 = true;
- prog_data->prog_offset_32 = g.generate_code(simd32_cfg, 32, v32_shader_stats, stats);
+ prog_data->prog_offset_32 = g.generate_code(
+ simd32_cfg, 32, v32->shader_stats,
+ v32->performance_analysis.require(), stats);
stats = stats ? stats + 1 : NULL;
}
+ delete v8;
+ delete v16;
+ delete v32;
+
return g.get_assembly();
}
prog_data->nr_params);
}
+static bool
+filter_simd(const nir_instr *instr, const void *_options)
+{
+ if (instr->type != nir_instr_type_intrinsic)
+ return false;
+
+ switch (nir_instr_as_intrinsic(instr)->intrinsic) {
+ case nir_intrinsic_load_simd_width_intel:
+ case nir_intrinsic_load_subgroup_id:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static nir_ssa_def *
+lower_simd(nir_builder *b, nir_instr *instr, void *options)
+{
+ uintptr_t simd_width = (uintptr_t)options;
+
+ switch (nir_instr_as_intrinsic(instr)->intrinsic) {
+ case nir_intrinsic_load_simd_width_intel:
+ return nir_imm_int(b, simd_width);
+
+ case nir_intrinsic_load_subgroup_id:
+ /* If the whole workgroup fits in one thread, we can lower subgroup_id
+ * to a constant zero.
+ */
+ if (!b->shader->info.cs.local_size_variable) {
+ unsigned local_workgroup_size = b->shader->info.cs.local_size[0] *
+ b->shader->info.cs.local_size[1] *
+ b->shader->info.cs.local_size[2];
+ if (local_workgroup_size <= simd_width)
+ return nir_imm_int(b, 0);
+ }
+ return NULL;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+brw_nir_lower_simd(nir_shader *nir, unsigned dispatch_width)
+{
+ nir_shader_lower_instructions(nir, filter_simd, lower_simd,
+ (void *)(uintptr_t)dispatch_width);
+}
+
static nir_shader *
compile_cs_to_nir(const struct brw_compiler *compiler,
void *mem_ctx,
nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
brw_nir_apply_key(shader, compiler, &key->base, dispatch_width, true);
- NIR_PASS_V(shader, brw_nir_lower_cs_intrinsics, dispatch_width);
+ NIR_PASS_V(shader, brw_nir_lower_simd, dispatch_width);
/* Clean up after the local index and ID calculations. */
NIR_PASS_V(shader, nir_opt_constant_folding);
prog_data->base.total_shared = src_shader->info.cs.shared_size;
prog_data->slm_size = src_shader->num_shared;
- unsigned local_workgroup_size;
- if (prog_data->uses_variable_group_size) {
- prog_data->max_variable_local_size =
- src_shader->info.cs.max_variable_local_size;
- local_workgroup_size = src_shader->info.cs.max_variable_local_size;
+ /* Generate code for all the possible SIMD variants. */
+ bool generate_all;
+
+ unsigned min_dispatch_width;
+ unsigned max_dispatch_width;
+
+ if (src_shader->info.cs.local_size_variable) {
+ generate_all = true;
+ min_dispatch_width = 8;
+ max_dispatch_width = 32;
} else {
+ generate_all = false;
prog_data->local_size[0] = src_shader->info.cs.local_size[0];
prog_data->local_size[1] = src_shader->info.cs.local_size[1];
prog_data->local_size[2] = src_shader->info.cs.local_size[2];
- local_workgroup_size = src_shader->info.cs.local_size[0] *
- src_shader->info.cs.local_size[1] * src_shader->info.cs.local_size[2];
- }
+ unsigned local_workgroup_size = prog_data->local_size[0] *
+ prog_data->local_size[1] *
+ prog_data->local_size[2];
- /* Limit max_threads to 64 for the GPGPU_WALKER command */
- const uint32_t max_threads = MIN2(64, compiler->devinfo->max_cs_threads);
- unsigned min_dispatch_width =
- DIV_ROUND_UP(local_workgroup_size, max_threads);
- min_dispatch_width = MAX2(8, min_dispatch_width);
- min_dispatch_width = util_next_power_of_two(min_dispatch_width);
- assert(min_dispatch_width <= 32);
- unsigned max_dispatch_width = 32;
-
- fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
- fs_visitor *v = NULL;
- const char *fail_msg = NULL;
+ /* Limit max_threads to 64 for the GPGPU_WALKER command */
+ const uint32_t max_threads = MIN2(64, compiler->devinfo->max_cs_threads);
+ min_dispatch_width = util_next_power_of_two(
+ MAX2(8, DIV_ROUND_UP(local_workgroup_size, max_threads)));
+ assert(min_dispatch_width <= 32);
+ max_dispatch_width = 32;
+ }
if ((int)key->base.subgroup_size_type >= (int)BRW_SUBGROUP_SIZE_REQUIRE_8) {
/* These enum values are expressly chosen to be equal to the subgroup
required_dispatch_width == 32);
if (required_dispatch_width < min_dispatch_width ||
required_dispatch_width > max_dispatch_width) {
- fail_msg = "Cannot satisfy explicit subgroup size";
- } else {
- min_dispatch_width = max_dispatch_width = required_dispatch_width;
+ if (error_str) {
+ *error_str = ralloc_strdup(mem_ctx,
+ "Cannot satisfy explicit subgroup size");
+ }
+ return NULL;
}
+ min_dispatch_width = max_dispatch_width = required_dispatch_width;
}
- /* Now the main event: Visit the shader IR and generate our CS IR for it.
- */
- if (!fail_msg && min_dispatch_width <= 8 && max_dispatch_width >= 8) {
+ assert(min_dispatch_width <= max_dispatch_width);
+
+ fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
+ fs_visitor *v = NULL;
+
+ if (likely(!(INTEL_DEBUG & DEBUG_NO8)) &&
+ min_dispatch_width <= 8 && max_dispatch_width >= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 8);
v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
&prog_data->base,
nir8, 8, shader_time_index);
- if (!v8->run_cs(min_dispatch_width)) {
- fail_msg = v8->fail_msg;
- } else {
- /* We should always be able to do SIMD32 for compute shaders */
- assert(v8->max_dispatch_width >= 32);
-
- v = v8;
- prog_data->simd_size = 8;
- cs_fill_push_const_info(compiler->devinfo, prog_data);
+ if (!v8->run_cs(true /* allow_spilling */)) {
+ if (error_str)
+ *error_str = ralloc_strdup(mem_ctx, v8->fail_msg);
+ delete v8;
+ return NULL;
}
+
+ /* We should always be able to do SIMD32 for compute shaders */
+ assert(v8->max_dispatch_width >= 32);
+
+ v = v8;
+ prog_data->prog_mask |= 1 << 0;
+ if (v8->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 0;
+ cs_fill_push_const_info(compiler->devinfo, prog_data);
}
if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
- !fail_msg && min_dispatch_width <= 16 && max_dispatch_width >= 16) {
+ (generate_all || !prog_data->prog_spilled) &&
+ min_dispatch_width <= 16 && max_dispatch_width >= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 16);
if (v8)
v16->import_uniforms(v8);
- if (!v16->run_cs(min_dispatch_width)) {
+ const bool allow_spilling = generate_all || v == NULL;
+ if (!v16->run_cs(allow_spilling)) {
compiler->shader_perf_log(log_data,
"SIMD16 shader failed to compile: %s",
v16->fail_msg);
if (!v) {
- fail_msg =
- "Couldn't generate SIMD16 program and not "
- "enough threads for SIMD8";
+ assert(v8 == NULL);
+ if (error_str) {
+ *error_str = ralloc_asprintf(
+ mem_ctx, "Not enough threads for SIMD8 and "
+ "couldn't generate SIMD16: %s", v16->fail_msg);
+ }
+ delete v16;
+ return NULL;
}
} else {
/* We should always be able to do SIMD32 for compute shaders */
assert(v16->max_dispatch_width >= 32);
v = v16;
- prog_data->simd_size = 16;
+ prog_data->prog_mask |= 1 << 1;
+ if (v16->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 1;
cs_fill_push_const_info(compiler->devinfo, prog_data);
}
}
- /* We should always be able to do SIMD32 for compute shaders */
- assert(!v16 || v16->max_dispatch_width >= 32);
-
- if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32)) &&
- max_dispatch_width >= 32) {
+ /* The SIMD32 is only enabled for cases it is needed unless forced.
+ *
+ * TODO: Use performance_analysis and drop this boolean.
+ */
+ const bool needs_32 = min_dispatch_width > 16 ||
+ (INTEL_DEBUG & DEBUG_DO32) ||
+ generate_all;
+
+ if (likely(!(INTEL_DEBUG & DEBUG_NO32)) &&
+ (generate_all || !prog_data->prog_spilled) &&
+ needs_32 &&
+ min_dispatch_width <= 32 && max_dispatch_width >= 32) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 32);
else if (v16)
v32->import_uniforms(v16);
- if (!v32->run_cs(min_dispatch_width)) {
+ const bool allow_spilling = generate_all || v == NULL;
+ if (!v32->run_cs(allow_spilling)) {
compiler->shader_perf_log(log_data,
"SIMD32 shader failed to compile: %s",
v32->fail_msg);
if (!v) {
- fail_msg =
- "Couldn't generate SIMD32 program and not "
- "enough threads for SIMD16";
+ assert(v8 == NULL);
+ assert(v16 == NULL);
+ if (error_str) {
+ *error_str = ralloc_asprintf(
+ mem_ctx, "Not enough threads for SIMD16 and "
+ "couldn't generate SIMD32: %s", v32->fail_msg);
+ }
+ delete v32;
+ return NULL;
}
} else {
v = v32;
- prog_data->simd_size = 32;
+ prog_data->prog_mask |= 1 << 2;
+ if (v32->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 2;
cs_fill_push_const_info(compiler->devinfo, prog_data);
}
}
- const unsigned *ret = NULL;
- if (unlikely(v == NULL)) {
- assert(fail_msg);
- if (error_str)
- *error_str = ralloc_strdup(mem_ctx, fail_msg);
- } else {
- fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
- if (INTEL_DEBUG & DEBUG_CS) {
- char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- src_shader->info.label ?
- src_shader->info.label : "unnamed",
- src_shader->info.name);
- g.enable_debug(name);
+ if (unlikely(!v && (INTEL_DEBUG & (DEBUG_NO8 | DEBUG_NO16 | DEBUG_NO32)))) {
+ if (error_str) {
+ *error_str =
+ ralloc_strdup(mem_ctx,
+ "Cannot satisfy INTEL_DEBUG flags SIMD restrictions");
}
+ return NULL;
+ }
+
+ assert(v);
- g.generate_code(v->cfg, prog_data->simd_size, v->shader_stats, stats);
+ const unsigned *ret = NULL;
- ret = g.get_assembly();
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
+ v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
+ if (INTEL_DEBUG & DEBUG_CS) {
+ char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
+ src_shader->info.label ?
+ src_shader->info.label : "unnamed",
+ src_shader->info.name);
+ g.enable_debug(name);
+ }
+
+ if (generate_all) {
+ if (prog_data->prog_mask & (1 << 0)) {
+ assert(v8);
+ prog_data->prog_offset[0] =
+ g.generate_code(v8->cfg, 8, v8->shader_stats,
+ v8->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+
+ if (prog_data->prog_mask & (1 << 1)) {
+ assert(v16);
+ prog_data->prog_offset[1] =
+ g.generate_code(v16->cfg, 16, v16->shader_stats,
+ v16->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+
+ if (prog_data->prog_mask & (1 << 2)) {
+ assert(v32);
+ prog_data->prog_offset[2] =
+ g.generate_code(v32->cfg, 32, v32->shader_stats,
+ v32->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+ } else {
+ /* Only one dispatch width will be valid, and will be at offset 0,
+ * which is already the default value of prog_offset_* fields.
+ */
+ prog_data->prog_mask = 1 << (v->dispatch_width / 16);
+ g.generate_code(v->cfg, v->dispatch_width, v->shader_stats,
+ v->performance_analysis.require(), stats);
}
+ ret = g.get_assembly();
+
delete v8;
delete v16;
delete v32;
return ret;
}
+unsigned
+brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo,
+ const struct brw_cs_prog_data *cs_prog_data,
+ unsigned group_size)
+{
+ const unsigned mask = cs_prog_data->prog_mask;
+ assert(mask != 0);
+
+ static const unsigned simd8 = 1 << 0;
+ static const unsigned simd16 = 1 << 1;
+ static const unsigned simd32 = 1 << 2;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DO32) && (mask & simd32))
+ return 32;
+
+ /* Limit max_threads to 64 for the GPGPU_WALKER command */
+ const uint32_t max_threads = MIN2(64, devinfo->max_cs_threads);
+
+ if ((mask & simd8) && group_size <= 8 * max_threads) {
+ /* Prefer SIMD16 if can do without spilling. Matches logic in
+ * brw_compile_cs.
+ */
+ if ((mask & simd16) && (~cs_prog_data->prog_spilled & simd16))
+ return 16;
+ return 8;
+ }
+
+ if ((mask & simd16) && group_size <= 16 * max_threads)
+ return 16;
+
+ assert(mask & simd32);
+ assert(group_size <= 32 * max_threads);
+ return 32;
+}
+
/**
* Test the dispatch mask packing assumptions of
* brw_stage_has_packed_dispatch(). Call this from e.g. the top of