bool opt_drop_redundant_mov_to_flags();
bool opt_register_renaming();
bool opt_bank_conflicts();
- unsigned bank_conflict_cycles(const fs_inst *inst) const;
bool register_coalesce();
bool compute_to_mrf();
bool eliminate_find_live_channel();
}
/**
- * Estimate the number of GRF bank conflict cycles incurred by an instruction.
+ * Return whether the instruction incurs GRF bank conflict cycles.
*
- * Note that this neglects conflict cycles prior to register allocation
- * because we don't know which bank each VGRF is going to end up aligned to.
+ * Note that this is only accurate after register allocation because otherwise
+ * we don't know which bank each VGRF is going to end up aligned to.
*/
-unsigned
-fs_visitor::bank_conflict_cycles(const fs_inst *inst) const
+bool
+has_bank_conflict(const gen_device_info *devinfo, const fs_inst *inst)
{
- if (grf_used && inst->is_3src(devinfo) &&
- is_grf(inst->src[1]) && is_grf(inst->src[2]) &&
- bank_of(reg_of(inst->src[1])) == bank_of(reg_of(inst->src[2])) &&
- !is_conflict_optimized_out(devinfo, inst)) {
- return DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
- } else {
- return 0;
- }
+ return inst->is_3src(devinfo) &&
+ is_grf(inst->src[1]) && is_grf(inst->src[2]) &&
+ bank_of(reg_of(inst->src[1])) == bank_of(reg_of(inst->src[2])) &&
+ !is_conflict_optimized_out(devinfo, inst);
}
}
int
-fs_instruction_scheduler::issue_time(backend_instruction *inst)
+fs_instruction_scheduler::issue_time(backend_instruction *inst0)
{
- const unsigned overhead = v->bank_conflict_cycles((fs_inst *)inst);
- if (is_compressed((fs_inst *)inst))
+ const fs_inst *inst = static_cast<fs_inst *>(inst0);
+ const unsigned overhead = v->grf_used && has_bank_conflict(v->devinfo, inst) ?
+ DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE) : 0;
+ if (is_compressed(inst))
return 4 + overhead;
else
return 2 + overhead;