#include <algorithm>
#include <map>
+#include <stack>
+#include <math.h>
#include "aco_ir.h"
#include "vulkan/radv_shader.h"
/**
* The general idea of this pass is:
- * The CFG is traversed in reverse postorder (forward).
- * Per BB one wait_ctx is maintained.
+ * The CFG is traversed in reverse postorder (forward) and loops are processed
+ * several times until no progress is made.
+ * Per BB two wait_ctx is maintained: an in-context and out-context.
* The in-context is the joined out-contexts of the predecessors.
* The context contains a map: gpr -> wait_entry
* consisting of the information about the cnt values to be waited for.
event_exp_mrt_null = 1 << 8,
event_gds_gpr_lock = 1 << 9,
event_vmem_gpr_lock = 1 << 10,
+ event_sendmsg = 1 << 11,
+ num_events = 12,
};
enum counter_type : uint8_t {
counter_lgkm = 1 << 1,
counter_vm = 1 << 2,
counter_vs = 1 << 3,
+ num_counters = 4,
};
static const uint16_t exp_events = event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock;
-static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat;
+static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
static const uint16_t vm_events = event_vmem | event_flat;
static const uint16_t vs_events = event_vmem_store;
case event_smem:
case event_lds:
case event_gds:
+ case event_sendmsg:
return counter_lgkm;
case event_vmem:
return counter_vm;
}
}
+uint16_t get_events_for_counter(counter_type ctr)
+{
+ switch (ctr) {
+ case counter_exp:
+ return exp_events;
+ case counter_lgkm:
+ return lgkm_events;
+ case counter_vm:
+ return vm_events;
+ case counter_vs:
+ return vs_events;
+ }
+ return 0;
+}
+
struct wait_imm {
static const uint8_t unset_counter = 0xff;
wait_imm(uint16_t vm_, uint16_t exp_, uint16_t lgkm_, uint16_t vs_) :
vm(vm_), exp(exp_), lgkm(lgkm_), vs(vs_) {}
+ wait_imm(enum chip_class chip, uint16_t packed) : vs(unset_counter)
+ {
+ vm = packed & 0xf;
+ if (chip >= GFX9)
+ vm |= (packed >> 10) & 0x30;
+
+ exp = (packed >> 4) & 0x7;
+
+ lgkm = (packed >> 8) & 0xf;
+ if (chip >= GFX10)
+ lgkm |= (packed >> 8) & 0x30;
+ }
+
uint16_t pack(enum chip_class chip) const
{
uint16_t imm = 0;
assert(exp == unset_counter || exp <= 0x7);
switch (chip) {
case GFX10:
+ case GFX10_3:
assert(lgkm == unset_counter || lgkm <= 0x3f);
assert(vm == unset_counter || vm <= 0x3f);
imm = ((vm & 0x30) << 10) | ((lgkm & 0x3f) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
return imm;
}
- void combine(const wait_imm& other)
+ bool combine(const wait_imm& other)
{
+ bool changed = other.vm < vm || other.exp < exp || other.lgkm < lgkm || other.vs < vs;
vm = std::min(vm, other.vm);
exp = std::min(exp, other.exp);
lgkm = std::min(lgkm, other.lgkm);
vs = std::min(vs, other.vs);
+ return changed;
}
bool empty() const
uint8_t counters; /* use counter_type notion */
bool wait_on_read:1;
bool logical:1;
+ bool has_vmem_nosampler:1;
+ bool has_vmem_sampler:1;
wait_entry(wait_event event, wait_imm imm, bool logical, bool wait_on_read)
: imm(imm), events(event), counters(get_counters_for_event(event)),
- wait_on_read(wait_on_read), logical(logical) {}
+ wait_on_read(wait_on_read), logical(logical),
+ has_vmem_nosampler(false), has_vmem_sampler(false) {}
- void join(const wait_entry& other)
+ bool join(const wait_entry& other)
{
+ bool changed = (other.events & ~events) ||
+ (other.counters & ~counters) ||
+ (other.wait_on_read && !wait_on_read) ||
+ (other.has_vmem_nosampler && !has_vmem_nosampler) ||
+ (other.has_vmem_sampler && !has_vmem_sampler);
events |= other.events;
counters |= other.counters;
- imm.combine(other.imm);
- wait_on_read = wait_on_read || other.wait_on_read;
+ changed |= imm.combine(other.imm);
+ wait_on_read |= other.wait_on_read;
+ has_vmem_nosampler |= other.has_vmem_nosampler;
+ has_vmem_sampler |= other.has_vmem_sampler;
assert(logical == other.logical);
+ return changed;
}
void remove_counter(counter_type counter)
if (counter == counter_lgkm) {
imm.lgkm = wait_imm::unset_counter;
- events &= ~(event_smem | event_lds | event_gds);
+ events &= ~(event_smem | event_lds | event_gds | event_sendmsg);
}
if (counter == counter_vm) {
imm.vm = wait_imm::unset_counter;
events &= ~event_vmem;
+ has_vmem_nosampler = false;
+ has_vmem_sampler = false;
}
if (counter == counter_exp) {
uint8_t vs_cnt = 0;
bool pending_flat_lgkm = false;
bool pending_flat_vm = false;
+ bool pending_s_buffer_store = false; /* GFX10 workaround */
- wait_imm barrier_imm[barrier_count];
+ wait_imm barrier_imm[storage_count];
+ uint16_t barrier_events[storage_count] = {}; /* use wait_event notion */
std::map<PhysReg,wait_entry> gpr_map;
+ /* used for vmem/smem scores */
+ bool collect_statistics;
+ Instruction *gen_instr;
+ std::map<Instruction *, unsigned> unwaited_instrs[num_counters];
+ std::map<PhysReg,std::set<Instruction *>> reg_instrs[num_counters];
+ std::vector<unsigned> wait_distances[num_events];
+
wait_ctx() {}
wait_ctx(Program *program_)
: program(program_),
max_exp_cnt(6),
max_lgkm_cnt(program_->chip_class >= GFX10 ? 62 : 14),
max_vs_cnt(program_->chip_class >= GFX10 ? 62 : 0),
- unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0)) {}
+ unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0)),
+ collect_statistics(program_->collect_statistics) {}
- void join(const wait_ctx* other, bool logical)
+ bool join(const wait_ctx* other, bool logical)
{
+ bool changed = other->exp_cnt > exp_cnt ||
+ other->vm_cnt > vm_cnt ||
+ other->lgkm_cnt > lgkm_cnt ||
+ other->vs_cnt > vs_cnt ||
+ (other->pending_flat_lgkm && !pending_flat_lgkm) ||
+ (other->pending_flat_vm && !pending_flat_vm);
+
exp_cnt = std::max(exp_cnt, other->exp_cnt);
vm_cnt = std::max(vm_cnt, other->vm_cnt);
lgkm_cnt = std::max(lgkm_cnt, other->lgkm_cnt);
vs_cnt = std::max(vs_cnt, other->vs_cnt);
pending_flat_lgkm |= other->pending_flat_lgkm;
pending_flat_vm |= other->pending_flat_vm;
+ pending_s_buffer_store |= other->pending_s_buffer_store;
for (std::pair<PhysReg,wait_entry> entry : other->gpr_map)
{
if (entry.second.logical != logical)
continue;
- if (it != gpr_map.end())
- it->second.join(entry.second);
- else
+ if (it != gpr_map.end()) {
+ changed |= it->second.join(entry.second);
+ } else {
gpr_map.insert(entry);
+ changed = true;
+ }
+ }
+
+ for (unsigned i = 0; i < storage_count; i++) {
+ changed |= barrier_imm[i].combine(other->barrier_imm[i]);
+ changed |= other->barrier_events[i] & ~barrier_events[i];
+ barrier_events[i] |= other->barrier_events[i];
}
- for (unsigned i = 0; i < barrier_count; i++)
- barrier_imm[i].combine(other->barrier_imm[i]);
+ /* these are used for statistics, so don't update "changed" */
+ for (unsigned i = 0; i < num_counters; i++) {
+ for (std::pair<Instruction *, unsigned> instr : other->unwaited_instrs[i]) {
+ auto pos = unwaited_instrs[i].find(instr.first);
+ if (pos == unwaited_instrs[i].end())
+ unwaited_instrs[i].insert(instr);
+ else
+ pos->second = std::min(pos->second, instr.second);
+ }
+ /* don't use a foreach loop to avoid copies */
+ for (auto it = other->reg_instrs[i].begin(); it != other->reg_instrs[i].end(); ++it)
+ reg_instrs[i][it->first].insert(it->second.begin(), it->second.end());
+ }
+
+ return changed;
+ }
+
+ void wait_and_remove_from_entry(PhysReg reg, wait_entry& entry, counter_type counter) {
+ if (collect_statistics && (entry.counters & counter)) {
+ unsigned counter_idx = ffs(counter) - 1;
+ for (Instruction *instr : reg_instrs[counter_idx][reg]) {
+ auto pos = unwaited_instrs[counter_idx].find(instr);
+ if (pos == unwaited_instrs[counter_idx].end())
+ continue;
+
+ unsigned distance = pos->second;
+ unsigned events = entry.events & get_events_for_counter(counter);
+ while (events) {
+ unsigned event_idx = u_bit_scan(&events);
+ wait_distances[event_idx].push_back(distance);
+ }
+
+ unwaited_instrs[counter_idx].erase(instr);
+ }
+ reg_instrs[counter_idx][reg].clear();
+ }
+
+ entry.remove_counter(counter);
+ }
+
+ void advance_unwaited_instrs()
+ {
+ for (unsigned i = 0; i < num_counters; i++) {
+ for (auto it = unwaited_instrs[i].begin(); it != unwaited_instrs[i].end(); ++it)
+ it->second++;
+ }
}
};
continue;
/* Vector Memory reads and writes return in the order they were issued */
- if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem)) {
- it->second.remove_counter(counter_vm);
- if (!it->second.counters)
- it = ctx.gpr_map.erase(it);
+ bool has_sampler = instr->format == Format::MIMG && !instr->operands[1].isUndefined() && instr->operands[1].regClass() == s4;
+ if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem) &&
+ it->second.has_vmem_nosampler == !has_sampler && it->second.has_vmem_sampler == has_sampler)
continue;
- }
/* LDS reads and writes return in the order they were issued. same for GDS */
if (instr->format == Format::DS) {
bool gds = static_cast<DS_instruction*>(instr)->gds;
- if ((it->second.events & lgkm_events) == (gds ? event_gds : event_lds)) {
- it->second.remove_counter(counter_lgkm);
- if (!it->second.counters)
- it = ctx.gpr_map.erase(it);
+ if ((it->second.events & lgkm_events) == (gds ? event_gds : event_lds))
continue;
- }
}
wait.combine(it->second.imm);
return wait;
}
-wait_imm kill(Instruction* instr, wait_ctx& ctx)
+wait_imm parse_wait_instr(wait_ctx& ctx, Instruction *instr)
+{
+ if (instr->opcode == aco_opcode::s_waitcnt_vscnt &&
+ instr->definitions[0].physReg() == sgpr_null) {
+ wait_imm imm;
+ imm.vs = std::min<uint8_t>(imm.vs, static_cast<SOPK_instruction*>(instr)->imm);
+ return imm;
+ } else if (instr->opcode == aco_opcode::s_waitcnt) {
+ return wait_imm(ctx.chip_class, static_cast<SOPP_instruction*>(instr)->imm);
+ }
+ return wait_imm();
+}
+
+wait_imm perform_barrier(wait_ctx& ctx, memory_sync_info sync, unsigned semantics)
+{
+ wait_imm imm;
+ sync_scope subgroup_scope = ctx.program->workgroup_size <= ctx.program->wave_size ? scope_workgroup : scope_subgroup;
+ if ((sync.semantics & semantics) && sync.scope > subgroup_scope) {
+ unsigned storage = sync.storage;
+ while (storage) {
+ unsigned idx = u_bit_scan(&storage);
+
+ /* LDS is private to the workgroup */
+ sync_scope bar_scope_lds = MIN2(sync.scope, scope_workgroup);
+
+ uint16_t events = ctx.barrier_events[idx];
+ if (bar_scope_lds <= subgroup_scope)
+ events &= ~event_lds;
+
+ /* in non-WGP, the L1/L0 cache keeps all memory operations in-order for the same workgroup */
+ if (ctx.chip_class < GFX10 && sync.scope <= scope_workgroup)
+ events &= ~(event_vmem | event_vmem_store | event_smem);
+
+ if (events)
+ imm.combine(ctx.barrier_imm[idx]);
+ }
+ }
+
+ return imm;
+}
+
+wait_imm kill(Instruction* instr, wait_ctx& ctx, memory_sync_info sync_info)
{
wait_imm imm;
if (ctx.exp_cnt || ctx.vm_cnt || ctx.lgkm_cnt)
imm.combine(check_instr(instr, ctx));
- if (instr->format == Format::PSEUDO_BARRIER) {
- unsigned* bsize = ctx.program->info->cs.block_size;
- unsigned workgroup_size = bsize[0] * bsize[1] * bsize[2];
- switch (instr->opcode) {
- case aco_opcode::p_memory_barrier_all:
- for (unsigned i = 0; i < barrier_count; i++) {
- if ((1 << i) == barrier_shared && workgroup_size <= 64)
- continue;
- imm.combine(ctx.barrier_imm[i]);
- }
- break;
- case aco_opcode::p_memory_barrier_atomic:
- imm.combine(ctx.barrier_imm[ffs(barrier_atomic) - 1]);
- break;
- /* see comment in aco_scheduler.cpp's can_move_instr() on why these barriers are merged */
- case aco_opcode::p_memory_barrier_buffer:
- case aco_opcode::p_memory_barrier_image:
- imm.combine(ctx.barrier_imm[ffs(barrier_buffer) - 1]);
- imm.combine(ctx.barrier_imm[ffs(barrier_image) - 1]);
- break;
- case aco_opcode::p_memory_barrier_shared:
- if (workgroup_size > 64)
- imm.combine(ctx.barrier_imm[ffs(barrier_shared) - 1]);
- break;
- default:
- assert(false);
- break;
+ imm.combine(parse_wait_instr(ctx, instr));
+
+
+ /* It's required to wait for scalar stores before "writing back" data.
+ * It shouldn't cost anything anyways since we're about to do s_endpgm.
+ */
+ if (ctx.lgkm_cnt && instr->opcode == aco_opcode::s_dcache_wb) {
+ assert(ctx.chip_class >= GFX8);
+ imm.lgkm = 0;
+ }
+
+ if (ctx.chip_class >= GFX10 && instr->format == Format::SMEM) {
+ /* GFX10: A store followed by a load at the same address causes a problem because
+ * the load doesn't load the correct values unless we wait for the store first.
+ * This is NOT mitigated by an s_nop.
+ *
+ * TODO: Refine this when we have proper alias analysis.
+ */
+ SMEM_instruction *smem = static_cast<SMEM_instruction *>(instr);
+ if (ctx.pending_s_buffer_store &&
+ !smem->definitions.empty() &&
+ !smem->sync.can_reorder()) {
+ imm.lgkm = 0;
}
}
+ if (instr->opcode == aco_opcode::p_barrier)
+ imm.combine(perform_barrier(ctx, static_cast<Pseudo_barrier_instruction *>(instr)->sync, semantic_acqrel));
+ else
+ imm.combine(perform_barrier(ctx, sync_info, semantic_release));
+
if (!imm.empty()) {
if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
imm.vm = 0;
ctx.vs_cnt = std::min(ctx.vs_cnt, imm.vs);
/* update barrier wait imms */
- for (unsigned i = 0; i < barrier_count; i++) {
+ for (unsigned i = 0; i < storage_count; i++) {
wait_imm& bar = ctx.barrier_imm[i];
- if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp)
+ uint16_t& bar_ev = ctx.barrier_events[i];
+ if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp) {
bar.exp = wait_imm::unset_counter;
- if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm)
+ bar_ev &= ~exp_events;
+ }
+ if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm) {
bar.vm = wait_imm::unset_counter;
- if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm)
+ bar_ev &= ~(vm_events & ~event_flat);
+ }
+ if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm) {
bar.lgkm = wait_imm::unset_counter;
- if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs)
+ bar_ev &= ~(lgkm_events & ~event_flat);
+ }
+ if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs) {
bar.vs = wait_imm::unset_counter;
+ bar_ev &= ~vs_events;
+ }
+ if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
+ bar_ev &= ~event_flat;
}
- /* remove all vgprs with higher counter from map */
+ /* remove all gprs with higher counter from map */
std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.begin();
while (it != ctx.gpr_map.end())
{
if (imm.exp != wait_imm::unset_counter && imm.exp <= it->second.imm.exp)
- it->second.remove_counter(counter_exp);
+ ctx.wait_and_remove_from_entry(it->first, it->second, counter_exp);
if (imm.vm != wait_imm::unset_counter && imm.vm <= it->second.imm.vm)
- it->second.remove_counter(counter_vm);
+ ctx.wait_and_remove_from_entry(it->first, it->second, counter_vm);
if (imm.lgkm != wait_imm::unset_counter && imm.lgkm <= it->second.imm.lgkm)
- it->second.remove_counter(counter_lgkm);
- if (imm.lgkm != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
- it->second.remove_counter(counter_vs);
+ ctx.wait_and_remove_from_entry(it->first, it->second, counter_lgkm);
+ if (imm.vs != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
+ ctx.wait_and_remove_from_entry(it->first, it->second, counter_vs);
if (!it->second.counters)
it = ctx.gpr_map.erase(it);
else
if (imm.vm == 0)
ctx.pending_flat_vm = false;
- if (imm.lgkm == 0)
+ if (imm.lgkm == 0) {
ctx.pending_flat_lgkm = false;
+ ctx.pending_s_buffer_store = false;
+ }
return imm;
}
-void update_barrier_imm(wait_ctx& ctx, uint8_t counters, barrier_interaction barrier)
+void update_barrier_counter(uint8_t *ctr, unsigned max)
+{
+ if (*ctr != wait_imm::unset_counter && *ctr < max)
+ (*ctr)++;
+}
+
+void update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, memory_sync_info sync)
{
- unsigned barrier_index = ffs(barrier) - 1;
- for (unsigned i = 0; i < barrier_count; i++) {
+ for (unsigned i = 0; i < storage_count; i++) {
wait_imm& bar = ctx.barrier_imm[i];
- if (i == barrier_index) {
+ uint16_t& bar_ev = ctx.barrier_events[i];
+ if (sync.storage & (1 << i) && !(sync.semantics & semantic_private)) {
+ bar_ev |= event;
if (counters & counter_lgkm)
bar.lgkm = 0;
if (counters & counter_vm)
bar.exp = 0;
if (counters & counter_vs)
bar.vs = 0;
- } else {
- if (counters & counter_lgkm && bar.lgkm != wait_imm::unset_counter && bar.lgkm < ctx.max_lgkm_cnt)
- bar.lgkm++;
- if (counters & counter_vm && bar.vm != wait_imm::unset_counter && bar.vm < ctx.max_vm_cnt)
- bar.vm++;
- if (counters & counter_exp && bar.exp != wait_imm::unset_counter && bar.exp < ctx.max_exp_cnt)
- bar.exp++;
- if (counters & counter_vs && bar.vs != wait_imm::unset_counter && bar.vs < ctx.max_vs_cnt)
- bar.vs++;
+ } else if (!(bar_ev & ctx.unordered_events) && !(ctx.unordered_events & event)) {
+ if (counters & counter_lgkm && (bar_ev & lgkm_events) == event)
+ update_barrier_counter(&bar.lgkm, ctx.max_lgkm_cnt);
+ if (counters & counter_vm && (bar_ev & vm_events) == event)
+ update_barrier_counter(&bar.vm, ctx.max_vm_cnt);
+ if (counters & counter_exp && (bar_ev & exp_events) == event)
+ update_barrier_counter(&bar.exp, ctx.max_exp_cnt);
+ if (counters & counter_vs && (bar_ev & vs_events) == event)
+ update_barrier_counter(&bar.vs, ctx.max_vs_cnt);
}
}
}
-void update_counters(wait_ctx& ctx, wait_event event, barrier_interaction barrier=barrier_none)
+void update_counters(wait_ctx& ctx, wait_event event, memory_sync_info sync=memory_sync_info())
{
uint8_t counters = get_counters_for_event(event);
if (counters & counter_vs && ctx.vs_cnt <= ctx.max_vs_cnt)
ctx.vs_cnt++;
- update_barrier_imm(ctx, counters, barrier);
+ update_barrier_imm(ctx, counters, event, sync);
if (ctx.unordered_events & event)
return;
}
}
-void update_counters_for_flat_load(wait_ctx& ctx, barrier_interaction barrier=barrier_none)
+void update_counters_for_flat_load(wait_ctx& ctx, memory_sync_info sync=memory_sync_info())
{
assert(ctx.chip_class < GFX10);
if (ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
ctx.lgkm_cnt++;
- if (ctx.lgkm_cnt <= ctx.max_vm_cnt)
- ctx.vm_cnt++;
+ if (ctx.vm_cnt <= ctx.max_vm_cnt)
+ ctx.vm_cnt++;
- update_barrier_imm(ctx, counter_vm | counter_lgkm, barrier);
+ update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, sync);
for (std::pair<PhysReg,wait_entry> e : ctx.gpr_map)
{
ctx.pending_flat_vm = true;
}
-void insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read)
+void insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read,
+ bool has_sampler=false)
{
uint16_t counters = get_counters_for_event(event);
wait_imm imm;
imm.vs = 0;
wait_entry new_entry(event, imm, !rc.is_linear(), wait_on_read);
+ new_entry.has_vmem_nosampler = (event & event_vmem) && !has_sampler;
+ new_entry.has_vmem_sampler = (event & event_vmem) && has_sampler;
for (unsigned i = 0; i < rc.size(); i++) {
- auto it = ctx.gpr_map.emplace(PhysReg{reg.reg+i}, new_entry);
+ auto it = ctx.gpr_map.emplace(PhysReg{reg.reg()+i}, new_entry);
if (!it.second)
it.first->second.join(new_entry);
}
+
+ if (ctx.collect_statistics) {
+ unsigned counters_todo = counters;
+ while (counters_todo) {
+ unsigned i = u_bit_scan(&counters_todo);
+ ctx.unwaited_instrs[i].insert(std::make_pair(ctx.gen_instr, 0u));
+ for (unsigned j = 0; j < rc.size(); j++)
+ ctx.reg_instrs[i][PhysReg{reg.reg()+j}].insert(ctx.gen_instr);
+ }
+ }
}
-void insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event)
+void insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event, bool has_sampler=false)
{
if (!op.isConstant() && !op.isUndefined())
- insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false);
+ insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, has_sampler);
}
-void insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event)
+void insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event, bool has_sampler=false)
{
- insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true);
+ insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, has_sampler);
}
void gen(Instruction* instr, wait_ctx& ctx)
break;
}
case Format::FLAT: {
+ FLAT_instruction *flat = static_cast<FLAT_instruction*>(instr);
if (ctx.chip_class < GFX10 && !instr->definitions.empty())
- update_counters_for_flat_load(ctx, barrier_buffer);
+ update_counters_for_flat_load(ctx, flat->sync);
else
- update_counters(ctx, event_flat, barrier_buffer);
+ update_counters(ctx, event_flat, flat->sync);
if (!instr->definitions.empty())
insert_wait_entry(ctx, instr->definitions[0], event_flat);
break;
}
case Format::SMEM: {
- update_counters(ctx, event_smem, static_cast<SMEM_instruction*>(instr)->barrier);
+ SMEM_instruction *smem = static_cast<SMEM_instruction*>(instr);
+ update_counters(ctx, event_smem, smem->sync);
if (!instr->definitions.empty())
insert_wait_entry(ctx, instr->definitions[0], event_smem);
+ else if (ctx.chip_class >= GFX10 &&
+ !smem->sync.can_reorder())
+ ctx.pending_s_buffer_store = true;
+
break;
}
case Format::DS: {
- bool gds = static_cast<DS_instruction*>(instr)->gds;
- update_counters(ctx, gds ? event_gds : event_lds, gds ? barrier_none : barrier_shared);
- if (gds)
+ DS_instruction *ds = static_cast<DS_instruction*>(instr);
+ update_counters(ctx, ds->gds ? event_gds : event_lds, ds->sync);
+ if (ds->gds)
update_counters(ctx, event_gds_gpr_lock);
if (!instr->definitions.empty())
- insert_wait_entry(ctx, instr->definitions[0], gds ? event_gds : event_lds);
+ insert_wait_entry(ctx, instr->definitions[0], ds->gds ? event_gds : event_lds);
- if (gds) {
+ if (ds->gds) {
for (const Operand& op : instr->operands)
insert_wait_entry(ctx, op, event_gds_gpr_lock);
insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
case Format::MIMG:
case Format::GLOBAL: {
wait_event ev = !instr->definitions.empty() || ctx.chip_class < GFX10 ? event_vmem : event_vmem_store;
- update_counters(ctx, ev, get_barrier_interaction(instr));
+ update_counters(ctx, ev, get_sync_info(instr));
+
+ bool has_sampler = instr->format == Format::MIMG && !instr->operands[1].isUndefined() && instr->operands[1].regClass() == s4;
if (!instr->definitions.empty())
- insert_wait_entry(ctx, instr->definitions[0], ev);
+ insert_wait_entry(ctx, instr->definitions[0], ev, has_sampler);
- if (instr->operands.size() == 4 && ctx.chip_class == GFX6) {
+ if (ctx.chip_class == GFX6 &&
+ instr->format != Format::MIMG &&
+ instr->operands.size() == 4) {
ctx.exp_cnt++;
update_counters(ctx, event_vmem_gpr_lock);
insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
+ } else if (ctx.chip_class == GFX6 &&
+ instr->format == Format::MIMG &&
+ instr->operands[1].regClass().type() == RegType::vgpr) {
+ ctx.exp_cnt++;
+ update_counters(ctx, event_vmem_gpr_lock);
+ insert_wait_entry(ctx, instr->operands[1], event_vmem_gpr_lock);
}
+
break;
}
+ case Format::SOPP: {
+ if (instr->opcode == aco_opcode::s_sendmsg ||
+ instr->opcode == aco_opcode::s_sendmsghalt)
+ update_counters(ctx, event_sendmsg);
+ }
default:
break;
}
{
if (imm.vs != wait_imm::unset_counter) {
assert(ctx.chip_class >= GFX10);
- SOPK_instruction* waitcnt_vs = create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 0);
+ SOPK_instruction* waitcnt_vs = create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1);
+ waitcnt_vs->definitions[0] = Definition(sgpr_null, s1);
waitcnt_vs->imm = imm.vs;
instructions.emplace_back(waitcnt_vs);
imm.vs = wait_imm::unset_counter;
{
std::vector<aco_ptr<Instruction>> new_instructions;
+ wait_imm queued_imm;
+
for (aco_ptr<Instruction>& instr : block.instructions) {
- wait_imm imm = kill(instr.get(), ctx);
+ bool is_wait = !parse_wait_instr(ctx, instr.get()).empty();
- if (!imm.empty())
- emit_waitcnt(ctx, new_instructions, imm);
+ memory_sync_info sync_info = get_sync_info(instr.get());
+ queued_imm.combine(kill(instr.get(), ctx, sync_info));
+ ctx.gen_instr = instr.get();
gen(instr.get(), ctx);
- if (instr->format != Format::PSEUDO_BARRIER)
+ if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
+ if (!queued_imm.empty()) {
+ emit_waitcnt(ctx, new_instructions, queued_imm);
+ queued_imm = wait_imm();
+ }
new_instructions.emplace_back(std::move(instr));
+
+ queued_imm.combine(perform_barrier(ctx, sync_info, semantic_acquire));
+
+ if (ctx.collect_statistics)
+ ctx.advance_unwaited_instrs();
+ }
}
- /* check if this block is at the end of a loop */
- for (unsigned succ_idx : block.linear_succs) {
- /* eliminate any remaining counters */
- if (succ_idx <= block.index && (ctx.vm_cnt || ctx.exp_cnt || ctx.lgkm_cnt || ctx.vs_cnt)) {
- // TODO: we could do better if we only wait if the regs between the block and other predecessors differ
+ if (!queued_imm.empty())
+ emit_waitcnt(ctx, new_instructions, queued_imm);
- aco_ptr<Instruction> branch = std::move(new_instructions.back());
- new_instructions.pop_back();
+ block.instructions.swap(new_instructions);
+}
- wait_imm imm(ctx.vm_cnt ? 0 : wait_imm::unset_counter,
- ctx.exp_cnt ? 0 : wait_imm::unset_counter,
- ctx.lgkm_cnt ? 0 : wait_imm::unset_counter,
- ctx.vs_cnt ? 0 : wait_imm::unset_counter);
- emit_waitcnt(ctx, new_instructions, imm);
+} /* end namespace */
- new_instructions.push_back(std::move(branch));
+static uint32_t calculate_score(std::vector<wait_ctx> &ctx_vec, uint32_t event_mask)
+{
+ double result = 0.0;
+ unsigned num_waits = 0;
+ while (event_mask) {
+ unsigned event_index = u_bit_scan(&event_mask);
+ for (const wait_ctx &ctx : ctx_vec) {
+ for (unsigned dist : ctx.wait_distances[event_index]) {
+ double score = dist;
+ /* for many events, excessive distances provide little benefit, so
+ * decrease the score in that case. */
+ double threshold = INFINITY;
+ double inv_strength = 0.000001;
+ switch (1 << event_index) {
+ case event_smem:
+ threshold = 70.0;
+ inv_strength = 75.0;
+ break;
+ case event_vmem:
+ case event_vmem_store:
+ case event_flat:
+ threshold = 230.0;
+ inv_strength = 150.0;
+ break;
+ case event_lds:
+ threshold = 16.0;
+ break;
+ default:
+ break;
+ }
+ if (score > threshold) {
+ score -= threshold;
+ score = threshold + score / (1.0 + score / inv_strength);
+ }
- ctx = wait_ctx(program);
- break;
+ /* we don't want increases in high scores to hide decreases in low scores,
+ * so raise to the power of 0.1 before averaging. */
+ result += pow(score, 0.1);
+ num_waits++;
+ }
}
}
- block.instructions.swap(new_instructions);
+ return round(pow(result / num_waits, 10.0) * 10.0);
}
-} /* end namespace */
-
void insert_wait_states(Program* program)
{
- wait_ctx out_ctx[program->blocks.size()]; /* per BB ctx */
- for (unsigned i = 0; i < program->blocks.size(); i++)
- out_ctx[i] = wait_ctx(program);
-
- for (unsigned i = 0; i < program->blocks.size(); i++) {
- Block& current = program->blocks[i];
- wait_ctx& in = out_ctx[current.index];
+ /* per BB ctx */
+ std::vector<bool> done(program->blocks.size());
+ std::vector<wait_ctx> in_ctx(program->blocks.size(), wait_ctx(program));
+ std::vector<wait_ctx> out_ctx(program->blocks.size(), wait_ctx(program));
+
+ std::stack<unsigned> loop_header_indices;
+ unsigned loop_progress = 0;
+
+ for (unsigned i = 0; i < program->blocks.size();) {
+ Block& current = program->blocks[i++];
+ wait_ctx ctx = in_ctx[current.index];
+
+ if (current.kind & block_kind_loop_header) {
+ loop_header_indices.push(current.index);
+ } else if (current.kind & block_kind_loop_exit) {
+ bool repeat = false;
+ if (loop_progress == loop_header_indices.size()) {
+ i = loop_header_indices.top();
+ repeat = true;
+ }
+ loop_header_indices.pop();
+ loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
+ if (repeat)
+ continue;
+ }
+ bool changed = false;
for (unsigned b : current.linear_preds)
- in.join(&out_ctx[b], false);
+ changed |= ctx.join(&out_ctx[b], false);
for (unsigned b : current.logical_preds)
- in.join(&out_ctx[b], true);
+ changed |= ctx.join(&out_ctx[b], true);
+
+ if (done[current.index] && !changed) {
+ in_ctx[current.index] = std::move(ctx);
+ continue;
+ } else {
+ in_ctx[current.index] = ctx;
+ }
- if (current.instructions.empty())
+ if (current.instructions.empty()) {
+ out_ctx[current.index] = std::move(ctx);
continue;
+ }
+
+ loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
+ done[current.index] = true;
+
+ handle_block(program, current, ctx);
+
+ out_ctx[current.index] = std::move(ctx);
+ }
- handle_block(program, current, in);
+ if (program->collect_statistics) {
+ program->statistics[statistic_vmem_score] =
+ calculate_score(out_ctx, event_vmem | event_flat | event_vmem_store);
+ program->statistics[statistic_smem_score] =
+ calculate_score(out_ctx, event_smem);
}
}