break;
}
- if (inst->src[0].file == IMM) {
- assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
+ break;
+ case BRW_OPCODE_ADD:
+ if (inst->src[1].file != IMM)
+ continue;
+
+ if (brw_reg_type_is_integer(inst->src[1].type) &&
+ inst->src[1].is_zero()) {
inst->opcode = BRW_OPCODE_MOV;
- inst->src[0].f *= inst->src[1].f;
inst->src[1] = reg_undef;
progress = true;
break;
}
- break;
- case BRW_OPCODE_ADD:
- if (inst->src[1].file != IMM)
- continue;
if (inst->src[0].file == IMM) {
assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
return progress;
}
-/**
- * Optimize sample messages which are followed by the final RT write.
- *
- * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
- * results sent directly to the framebuffer, bypassing the EU. Recognize the
- * final texturing results copied to the framebuffer write payload and modify
- * them to write to the framebuffer directly.
- */
-bool
-fs_visitor::opt_sampler_eot()
-{
- brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
-
- if (stage != MESA_SHADER_FRAGMENT || dispatch_width > 16)
- return false;
-
- if (devinfo->gen != 9 && !devinfo->is_cherryview)
- return false;
-
- /* FINISHME: It should be possible to implement this optimization when there
- * are multiple drawbuffers.
- */
- if (key->nr_color_regions != 1)
- return false;
-
- /* Requires emitting a bunch of saturating MOV instructions during logical
- * send lowering to clamp the color payload, which the sampler unit isn't
- * going to do for us.
- */
- if (key->clamp_fragment_color)
- return false;
-
- /* Look for a texturing instruction immediately before the final FB_WRITE. */
- bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
- fs_inst *fb_write = (fs_inst *)block->end();
- assert(fb_write->eot);
- assert(fb_write->opcode == FS_OPCODE_FB_WRITE_LOGICAL);
-
- /* There wasn't one; nothing to do. */
- if (unlikely(fb_write->prev->is_head_sentinel()))
- return false;
-
- fs_inst *tex_inst = (fs_inst *) fb_write->prev;
-
- /* 3D Sampler » Messages » Message Format
- *
- * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
- * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
- */
- if (tex_inst->opcode != SHADER_OPCODE_TEX_LOGICAL &&
- tex_inst->opcode != SHADER_OPCODE_TXD_LOGICAL &&
- tex_inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
- tex_inst->opcode != SHADER_OPCODE_TXL_LOGICAL &&
- tex_inst->opcode != FS_OPCODE_TXB_LOGICAL &&
- tex_inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL &&
- tex_inst->opcode != SHADER_OPCODE_TXF_CMS_W_LOGICAL &&
- tex_inst->opcode != SHADER_OPCODE_TXF_UMS_LOGICAL)
- return false;
-
- /* XXX - This shouldn't be necessary. */
- if (tex_inst->prev->is_head_sentinel())
- return false;
-
- /* Check that the FB write sources are fully initialized by the single
- * texturing instruction.
- */
- for (unsigned i = 0; i < FB_WRITE_LOGICAL_NUM_SRCS; i++) {
- if (i == FB_WRITE_LOGICAL_SRC_COLOR0) {
- if (!fb_write->src[i].equals(tex_inst->dst) ||
- fb_write->size_read(i) != tex_inst->size_written)
- return false;
- } else if (i != FB_WRITE_LOGICAL_SRC_COMPONENTS) {
- if (fb_write->src[i].file != BAD_FILE)
- return false;
- }
- }
-
- assert(!tex_inst->eot); /* We can't get here twice */
- assert((tex_inst->offset & (0xff << 24)) == 0);
-
- const fs_builder ibld(this, block, tex_inst);
-
- tex_inst->offset |= fb_write->target << 24;
- tex_inst->eot = true;
- tex_inst->dst = ibld.null_reg_ud();
- tex_inst->size_written = 0;
- fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
-
- /* Marking EOT is sufficient, lower_logical_sends() will notice the EOT
- * flag and submit a header together with the sampler message as required
- * by the hardware.
- */
- invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
- return true;
-}
-
bool
fs_visitor::opt_register_renaming()
{
high.offset = inst->dst.offset % REG_SIZE;
if (devinfo->gen >= 7) {
- if (inst->src[1].abs)
+ /* From GEN:BUG:1604601757:
+ *
+ * "When multiplying a DW and any lower precision integer, source modifier
+ * is not supported."
+ *
+ * An unsupported negate modifier on src[1] would ordinarily be
+ * lowered by the subsequent lower_regioning pass. In this case that
+ * pass would spawn another dword multiply. Instead, lower the
+ * modifier first.
+ */
+ const bool source_mods_unsupported = (devinfo->gen >= 12);
+
+ if (inst->src[1].abs || (inst->src[1].negate &&
+ source_mods_unsupported))
lower_src_modifiers(this, block, inst, 1);
if (inst->src[1].file == IMM) {
if (coord_components > 0 &&
(has_lod || shadow_c.file != BAD_FILE ||
(op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
- for (unsigned i = coord_components; i < 3; i++)
+ assert(coord_components <= 3);
+ for (unsigned i = 0; i < 3 - coord_components; i++)
bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
msg_end = offset(msg_end, bld, 3 - coord_components);
assert(dispatch_width % payload_width == 0);
assert(devinfo->gen >= 6);
- prog_data->uses_src_depth = prog_data->uses_src_w =
- (nir->info.system_values_read & (1ull << SYSTEM_VALUE_FRAG_COORD)) != 0;
-
- prog_data->uses_sample_mask =
- (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
-
- /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
- *
- * "MSDISPMODE_PERSAMPLE is required in order to select
- * POSOFFSET_SAMPLE"
- *
- * So we can only really get sample positions if we are doing real
- * per-sample dispatch. If we need gl_SamplePosition and we don't have
- * persample dispatch, we hard-code it to 0.5.
- */
- prog_data->uses_pos_offset = prog_data->persample_dispatch &&
- (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
-
/* R0: PS thread payload header. */
payload.num_regs++;
OPT(lower_simd_width);
OPT(lower_barycentrics);
-
- /* After SIMD lowering just in case we had to unroll the EOT send. */
- OPT(opt_sampler_eot);
-
OPT(lower_logical_sends);
/* After logical SEND lowering. */
{
prog_data->flat_inputs = 0;
- nir_foreach_variable(var, &shader->inputs) {
+ nir_foreach_shader_in_variable(var, shader) {
unsigned slots = glsl_count_attribute_slots(var->type, false);
for (unsigned s = 0; s < slots; s++) {
int input_index = prog_data->urb_setup[var->data.location + s];
*
* This should be replaced by global value numbering someday.
*/
-static bool
-move_interpolation_to_top(nir_shader *nir)
+bool
+brw_nir_move_interpolation_to_top(nir_shader *nir)
{
bool progress = false;
*
* Useful when rendering to a non-multisampled buffer.
*/
-static bool
-demote_sample_qualifiers(nir_shader *nir)
+bool
+brw_nir_demote_sample_qualifiers(nir_shader *nir)
{
bool progress = true;
return progress;
}
+void
+brw_nir_populate_wm_prog_data(const nir_shader *shader,
+ const struct gen_device_info *devinfo,
+ const struct brw_wm_prog_key *key,
+ struct brw_wm_prog_data *prog_data)
+{
+ prog_data->uses_src_depth = prog_data->uses_src_w =
+ shader->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_FRAG_COORD);
+
+ /* key->alpha_test_func means simulating alpha testing via discards,
+ * so the shader definitely kills pixels.
+ */
+ prog_data->uses_kill = shader->info.fs.uses_discard ||
+ key->alpha_test_func;
+ prog_data->uses_omask = !key->ignore_sample_mask_out &&
+ (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
+ prog_data->computed_depth_mode = computed_depth_mode(shader);
+ prog_data->computed_stencil =
+ shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
+
+ prog_data->persample_dispatch =
+ key->multisample_fbo &&
+ (key->persample_interp ||
+ (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
+ SYSTEM_BIT_SAMPLE_POS)) ||
+ shader->info.fs.uses_sample_qualifier ||
+ shader->info.outputs_read);
+
+ if (devinfo->gen >= 6) {
+ prog_data->uses_sample_mask =
+ shader->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN;
+
+ /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
+ *
+ * "MSDISPMODE_PERSAMPLE is required in order to select
+ * POSOFFSET_SAMPLE"
+ *
+ * So we can only really get sample positions if we are doing real
+ * per-sample dispatch. If we need gl_SamplePosition and we don't have
+ * persample dispatch, we hard-code it to 0.5.
+ */
+ prog_data->uses_pos_offset = prog_data->persample_dispatch &&
+ (shader->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
+ }
+
+ prog_data->has_render_target_reads = shader->info.outputs_read != 0ull;
+
+ prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
+ prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
+ prog_data->inner_coverage = shader->info.fs.inner_coverage;
+
+ prog_data->barycentric_interp_modes =
+ brw_compute_barycentric_interp_modes(devinfo, shader);
+
+ calculate_urb_setup(devinfo, key, prog_data, shader);
+ brw_compute_flat_inputs(prog_data, shader);
+}
+
/**
* Pre-gen6, the register file of the EUs was shared between threads,
* and each thread used some subset allocated on a 16-register block
}
if (!key->multisample_fbo)
- NIR_PASS_V(shader, demote_sample_qualifiers);
- NIR_PASS_V(shader, move_interpolation_to_top);
+ NIR_PASS_V(shader, brw_nir_demote_sample_qualifiers);
+ NIR_PASS_V(shader, brw_nir_move_interpolation_to_top);
brw_postprocess_nir(shader, compiler, true);
- /* key->alpha_test_func means simulating alpha testing via discards,
- * so the shader definitely kills pixels.
- */
- prog_data->uses_kill = shader->info.fs.uses_discard ||
- key->alpha_test_func;
- prog_data->uses_omask = key->multisample_fbo &&
- shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
- prog_data->computed_depth_mode = computed_depth_mode(shader);
- prog_data->computed_stencil =
- shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
-
- prog_data->persample_dispatch =
- key->multisample_fbo &&
- (key->persample_interp ||
- (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
- SYSTEM_BIT_SAMPLE_POS)) ||
- shader->info.fs.uses_sample_qualifier ||
- shader->info.outputs_read);
-
- prog_data->has_render_target_reads = shader->info.outputs_read != 0ull;
-
- prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
- prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
- prog_data->inner_coverage = shader->info.fs.inner_coverage;
-
- prog_data->barycentric_interp_modes =
- brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
-
- calculate_urb_setup(devinfo, key, prog_data, shader);
- brw_compute_flat_inputs(prog_data, shader);
+ brw_nir_populate_wm_prog_data(shader, compiler->devinfo, key, prog_data);
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
}
}
+ const bool simd16_failed = v16 && !simd16_cfg;
+
/* Currently, the compiler only supports SIMD32 on SNB+ */
if (!has_spilled &&
v8->max_dispatch_width >= 32 && !use_rep_send &&
- devinfo->gen >= 6 && simd16_cfg &&
+ devinfo->gen >= 6 && !simd16_failed &&
!(INTEL_DEBUG & DEBUG_NO32)) {
/* Try a SIMD32 compile */
v32 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
prog_data->base.total_shared = src_shader->info.cs.shared_size;
prog_data->slm_size = src_shader->num_shared;
- unsigned local_workgroup_size;
+ /* Generate code for all the possible SIMD variants. */
+ bool generate_all;
+
+ unsigned min_dispatch_width;
+ unsigned max_dispatch_width;
+
if (src_shader->info.cs.local_size_variable) {
- local_workgroup_size = src_shader->info.cs.max_variable_local_size;
+ generate_all = true;
+ min_dispatch_width = 8;
+ max_dispatch_width = 32;
} else {
+ generate_all = false;
prog_data->local_size[0] = src_shader->info.cs.local_size[0];
prog_data->local_size[1] = src_shader->info.cs.local_size[1];
prog_data->local_size[2] = src_shader->info.cs.local_size[2];
- local_workgroup_size = src_shader->info.cs.local_size[0] *
- src_shader->info.cs.local_size[1] * src_shader->info.cs.local_size[2];
- }
+ unsigned local_workgroup_size = prog_data->local_size[0] *
+ prog_data->local_size[1] *
+ prog_data->local_size[2];
- /* Limit max_threads to 64 for the GPGPU_WALKER command */
- const uint32_t max_threads = MIN2(64, compiler->devinfo->max_cs_threads);
- unsigned min_dispatch_width =
- DIV_ROUND_UP(local_workgroup_size, max_threads);
- min_dispatch_width = MAX2(8, min_dispatch_width);
- min_dispatch_width = util_next_power_of_two(min_dispatch_width);
- assert(min_dispatch_width <= 32);
- unsigned max_dispatch_width = 32;
+ /* Limit max_threads to 64 for the GPGPU_WALKER command */
+ const uint32_t max_threads = MIN2(64, compiler->devinfo->max_cs_threads);
+ min_dispatch_width = util_next_power_of_two(
+ MAX2(8, DIV_ROUND_UP(local_workgroup_size, max_threads)));
+ assert(min_dispatch_width <= 32);
+ max_dispatch_width = 32;
+ }
if ((int)key->base.subgroup_size_type >= (int)BRW_SUBGROUP_SIZE_REQUIRE_8) {
/* These enum values are expressly chosen to be equal to the subgroup
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
fs_visitor *v = NULL;
- const char *fail_msg = NULL;
- /* Now the main event: Visit the shader IR and generate our CS IR for it.
- */
if (likely(!(INTEL_DEBUG & DEBUG_NO8)) &&
min_dispatch_width <= 8 && max_dispatch_width >= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
&prog_data->base,
nir8, 8, shader_time_index);
if (!v8->run_cs(true /* allow_spilling */)) {
- fail_msg = v8->fail_msg;
- } else {
- /* We should always be able to do SIMD32 for compute shaders */
- assert(v8->max_dispatch_width >= 32);
-
- v = v8;
- prog_data->simd_size = 8;
- cs_fill_push_const_info(compiler->devinfo, prog_data);
+ if (error_str)
+ *error_str = ralloc_strdup(mem_ctx, v8->fail_msg);
+ delete v8;
+ return NULL;
}
+
+ /* We should always be able to do SIMD32 for compute shaders */
+ assert(v8->max_dispatch_width >= 32);
+
+ v = v8;
+ prog_data->prog_mask |= 1 << 0;
+ if (v8->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 0;
+ cs_fill_push_const_info(compiler->devinfo, prog_data);
}
- if ((!v || !v->spilled_any_registers) &&
- likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
- !fail_msg && min_dispatch_width <= 16 && max_dispatch_width >= 16) {
+ if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
+ (generate_all || !prog_data->prog_spilled) &&
+ min_dispatch_width <= 16 && max_dispatch_width >= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 16);
if (v8)
v16->import_uniforms(v8);
- if (!v16->run_cs(v == NULL /* allow_spilling */)) {
+ const bool allow_spilling = generate_all || v == NULL;
+ if (!v16->run_cs(allow_spilling)) {
compiler->shader_perf_log(log_data,
"SIMD16 shader failed to compile: %s",
v16->fail_msg);
if (!v) {
- fail_msg =
- "Couldn't generate SIMD16 program and not "
- "enough threads for SIMD8";
+ assert(v8 == NULL);
+ if (error_str) {
+ *error_str = ralloc_asprintf(
+ mem_ctx, "Not enough threads for SIMD8 and "
+ "couldn't generate SIMD16: %s", v16->fail_msg);
+ }
+ delete v16;
+ return NULL;
}
} else {
/* We should always be able to do SIMD32 for compute shaders */
assert(v16->max_dispatch_width >= 32);
v = v16;
- prog_data->simd_size = 16;
+ prog_data->prog_mask |= 1 << 1;
+ if (v16->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 1;
cs_fill_push_const_info(compiler->devinfo, prog_data);
}
}
+ /* The SIMD32 is only enabled for cases it is needed unless forced.
+ *
+ * TODO: Use performance_analysis and drop this boolean.
+ */
+ const bool needs_32 = v == NULL ||
+ (INTEL_DEBUG & DEBUG_DO32) ||
+ generate_all;
+
if (likely(!(INTEL_DEBUG & DEBUG_NO32)) &&
- (!v || !v->spilled_any_registers) &&
- !fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32)) &&
- max_dispatch_width >= 32) {
+ (generate_all || !prog_data->prog_spilled) &&
+ needs_32 &&
+ min_dispatch_width <= 32 && max_dispatch_width >= 32) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 32);
else if (v16)
v32->import_uniforms(v16);
- if (!v32->run_cs(v == NULL /* allow_spilling */)) {
+ const bool allow_spilling = generate_all || v == NULL;
+ if (!v32->run_cs(allow_spilling)) {
compiler->shader_perf_log(log_data,
"SIMD32 shader failed to compile: %s",
v32->fail_msg);
if (!v) {
- fail_msg =
- "Couldn't generate SIMD32 program and not "
- "enough threads for SIMD16";
+ assert(v8 == NULL);
+ assert(v16 == NULL);
+ if (error_str) {
+ *error_str = ralloc_asprintf(
+ mem_ctx, "Not enough threads for SIMD16 and "
+ "couldn't generate SIMD32: %s", v32->fail_msg);
+ }
+ delete v32;
+ return NULL;
}
} else {
v = v32;
- prog_data->simd_size = 32;
+ prog_data->prog_mask |= 1 << 2;
+ if (v32->spilled_any_registers)
+ prog_data->prog_spilled |= 1 << 2;
cs_fill_push_const_info(compiler->devinfo, prog_data);
}
}
assert(v);
const unsigned *ret = NULL;
- if (unlikely(v == NULL)) {
- assert(fail_msg);
- if (error_str)
- *error_str = ralloc_strdup(mem_ctx, fail_msg);
- } else {
- fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
- if (INTEL_DEBUG & DEBUG_CS) {
- char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- src_shader->info.label ?
- src_shader->info.label : "unnamed",
- src_shader->info.name);
- g.enable_debug(name);
- }
- g.generate_code(v->cfg, prog_data->simd_size, v->shader_stats,
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
+ v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
+ if (INTEL_DEBUG & DEBUG_CS) {
+ char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
+ src_shader->info.label ?
+ src_shader->info.label : "unnamed",
+ src_shader->info.name);
+ g.enable_debug(name);
+ }
+
+ if (generate_all) {
+ if (prog_data->prog_mask & (1 << 0)) {
+ assert(v8);
+ prog_data->prog_offset[0] =
+ g.generate_code(v8->cfg, 8, v8->shader_stats,
+ v8->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+
+ if (prog_data->prog_mask & (1 << 1)) {
+ assert(v16);
+ prog_data->prog_offset[1] =
+ g.generate_code(v16->cfg, 16, v16->shader_stats,
+ v16->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+
+ if (prog_data->prog_mask & (1 << 2)) {
+ assert(v32);
+ prog_data->prog_offset[2] =
+ g.generate_code(v32->cfg, 32, v32->shader_stats,
+ v32->performance_analysis.require(), stats);
+ stats = stats ? stats + 1 : NULL;
+ }
+ } else {
+ /* Only one dispatch width will be valid, and will be at offset 0,
+ * which is already the default value of prog_offset_* fields.
+ */
+ prog_data->prog_mask = 1 << (v->dispatch_width / 16);
+ g.generate_code(v->cfg, v->dispatch_width, v->shader_stats,
v->performance_analysis.require(), stats);
-
- ret = g.get_assembly();
}
+ ret = g.get_assembly();
+
delete v8;
delete v16;
delete v32;
return ret;
}
+unsigned
+brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo,
+ const struct brw_cs_prog_data *cs_prog_data,
+ unsigned group_size)
+{
+ const unsigned mask = cs_prog_data->prog_mask;
+ assert(mask != 0);
+
+ static const unsigned simd8 = 1 << 0;
+ static const unsigned simd16 = 1 << 1;
+ static const unsigned simd32 = 1 << 2;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DO32) && (mask & simd32))
+ return 32;
+
+ /* Limit max_threads to 64 for the GPGPU_WALKER command */
+ const uint32_t max_threads = MIN2(64, devinfo->max_cs_threads);
+
+ if ((mask & simd8) && group_size <= 8 * max_threads) {
+ /* Prefer SIMD16 if can do without spilling. Matches logic in
+ * brw_compile_cs.
+ */
+ if ((mask & simd16) && (~cs_prog_data->prog_spilled & simd16))
+ return 16;
+ return 8;
+ }
+
+ if ((mask & simd16) && group_size <= 16 * max_threads)
+ return 16;
+
+ assert(mask & simd32);
+ assert(group_size <= 32 * max_threads);
+ return 32;
+}
+
/**
* Test the dispatch mask packing assumptions of
* brw_stage_has_packed_dispatch(). Call this from e.g. the top of