#include "codegen/nv50_ir.h"
#include "codegen/nv50_ir_target.h"
+#include <algorithm>
#include <stack>
#include <limits>
+#if __cplusplus >= 201103L
+#include <unordered_map>
+#else
+#include <tr1/unordered_map>
+#endif
namespace nv50_ir {
+#if __cplusplus >= 201103L
+using std::hash;
+using std::unordered_map;
+#elif !defined(ANDROID)
+using std::tr1::hash;
+using std::tr1::unordered_map;
+#else
+#error Android release before Lollipop is not supported!
+#endif
+
#define MAX_REGISTER_FILE_SIZE 256
class RegisterSet
void print() const;
+ const bool restrictedGPR16Range;
+
private:
BitSet bits[LAST_REGISTER_FILE + 1];
int last[LAST_REGISTER_FILE + 1];
int fill[LAST_REGISTER_FILE + 1];
-
- const bool restrictedGPR16Range;
};
void
private:
virtual bool visit(BasicBlock *);
inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
+ inline void splitEdges(BasicBlock *b);
};
class ArgumentMovesPass : public Pass {
void texConstraintNV50(TexInstruction *);
void texConstraintNVC0(TexInstruction *);
void texConstraintNVE0(TexInstruction *);
+ void texConstraintGM107(TexInstruction *);
std::list<Instruction *> constrList;
bool run(const std::list<ValuePair>&);
Symbol *assignSlot(const Interval&, const unsigned int size);
+ Value *offsetSlot(Value *, const LValue *);
inline int32_t getStackSize() const { return stackSize; }
private:
return (n == 2);
}
-// For each operand of each PHI in b, generate a new value by inserting a MOV
-// at the end of the block it is coming from and replace the operand with its
-// result. This eliminates liveness conflicts and enables us to let values be
-// copied to the right register if such a conflict exists nonetheless.
+struct PhiMapHash {
+ size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
+ return hash<Instruction*>()(val.first) * 31 +
+ hash<BasicBlock*>()(val.second);
+ }
+};
+
+typedef unordered_map<
+ std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
+
+// Critical edges need to be split up so that work can be inserted along
+// specific edge transitions. Unfortunately manipulating incident edges into a
+// BB invalidates all the PHI nodes since their sources are implicitly ordered
+// by incident edge order.
//
-// These MOVs are also crucial in making sure the live intervals of phi srces
-// are extended until the end of the loop, since they are not included in the
-// live-in sets.
-bool
-RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
+// TODO: Make it so that that is not the case, and PHI nodes store pointers to
+// the original BBs.
+void
+RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
{
- Instruction *phi, *mov;
BasicBlock *pb, *pn;
-
+ Instruction *phi;
+ Graph::EdgeIterator ei;
std::stack<BasicBlock *> stack;
+ int j = 0;
- for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
+ for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
pb = BasicBlock::get(ei.getNode());
assert(pb);
if (needNewElseBlock(bb, pb))
stack.push(pb);
}
+
+ // No critical edges were found, no need to perform any work.
+ if (stack.empty())
+ return;
+
+ // We're about to, potentially, reorder the inbound edges. This means that
+ // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
+ // nodes after the graph has been modified.
+ PhiMap phis;
+
+ j = 0;
+ for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
+ pb = BasicBlock::get(ei.getNode());
+ for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
+ phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
+ }
+
while (!stack.empty()) {
pb = stack.top();
pn = new BasicBlock(func);
assert(pb->getExit()->op != OP_CALL);
if (pb->getExit()->asFlow()->target.bb == bb)
pb->getExit()->asFlow()->target.bb = pn;
+
+ for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
+ PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
+ assert(it != phis.end());
+ phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
+ phis.erase(it);
+ }
}
+ // Now go through and fix up all of the phi node sources.
+ j = 0;
+ for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
+ pb = BasicBlock::get(ei.getNode());
+ for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
+ PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
+ assert(it != phis.end());
+
+ phi->setSrc(j, it->second);
+ }
+ }
+}
+
+// For each operand of each PHI in b, generate a new value by inserting a MOV
+// at the end of the block it is coming from and replace the operand with its
+// result. This eliminates liveness conflicts and enables us to let values be
+// copied to the right register if such a conflict exists nonetheless.
+//
+// These MOVs are also crucial in making sure the live intervals of phi srces
+// are extended until the end of the loop, since they are not included in the
+// live-in sets.
+bool
+RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
+{
+ Instruction *phi, *mov;
+
+ splitEdges(bb);
+
// insert MOVs (phi->src(j) should stem from j-th in-BB)
int j = 0;
for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
- pb = BasicBlock::get(ei.getNode());
+ BasicBlock *pb = BasicBlock::get(ei.getNode());
if (!pb->isTerminated())
pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
- mov = new_Instruction(func, OP_MOV, TYPE_U32);
+ LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
+ mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
mov->setSrc(0, phi->getSrc(j));
- mov->setDef(0, new_LValue(func, phi->getDef(0)->asLValue()));
- phi->setSrc(j, mov->getDef(0));
+ mov->setDef(0, tmp);
+ phi->setSrc(j, tmp);
pb->insertBefore(pb->getExit(), mov);
}
}
}
+static bool
+isShortRegOp(Instruction *insn)
+{
+ // Immediates are always in src1. Every other situation can be resolved by
+ // using a long encoding.
+ return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE;
+}
+
+// Check if this LValue is ever used in an instruction that can't be encoded
+// with long registers (i.e. > r63)
+static bool
+isShortRegVal(LValue *lval)
+{
+ if (lval->getInsn() == NULL)
+ return false;
+ for (Value::DefCIterator def = lval->defs.begin();
+ def != lval->defs.end(); ++def)
+ if (isShortRegOp((*def)->getInsn()))
+ return true;
+ for (Value::UseCIterator use = lval->uses.begin();
+ use != lval->uses.end(); ++use)
+ if (isShortRegOp((*use)->getInsn()))
+ return true;
+ return false;
+}
+
void
GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
{
weight = std::numeric_limits<float>::infinity();
degree = 0;
- degreeLimit = regs.getFileSize(f, lval->reg.size);
+ int size = regs.getFileSize(f, lval->reg.size);
+ // On nv50, we lose a bit of gpr encoding when there's an embedded
+ // immediate.
+ if (regs.restrictedGPR16Range && f == FILE_GPR && isShortRegVal(lval))
+ size /= 2;
+ degreeLimit = size;
+ degreeLimit -= relDegree[1][colors] - 1;
livei.insert(lval->livei);
}
case 0xe0:
case 0xf0:
case 0x100:
+ case 0x110:
+ case 0x120:
ret = doCoalesce(insns, JOIN_MASK_UNION);
break;
default:
break;
i = NULL;
if (!insn->getDef(0)->uses.empty())
- i = insn->getDef(0)->uses.front()->getInsn();
+ i = (*insn->getDef(0)->uses.begin())->getInsn();
// if this is a contraint-move there will only be a single use
if (i && i->op == OP_MERGE) // do we really still need this ?
break;
case OP_TXQ:
case OP_TXD:
case OP_TXG:
+ case OP_TXLQ:
case OP_TEXCSAA:
+ case OP_TEXPREP:
if (!(mask & JOIN_MASK_TEX))
break;
for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
if (lval) {
nodes[i].init(regs, lval);
RIG.insert(&nodes[i]);
+
+ if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL &&
+ prog->getTarget()->getChipset() < 0xc0) {
+ Instruction *insn = lval->getInsn();
+ if (insn->op == OP_MAD || insn->op == OP_SAD)
+ // Short encoding only possible if they're all GPRs, no need to
+ // affect them otherwise.
+ if (insn->flagsDef < 0 &&
+ insn->src(0).getFile() == FILE_GPR &&
+ insn->src(1).getFile() == FILE_GPR &&
+ insn->src(2).getFile() == FILE_GPR)
+ nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
+ }
}
}
delete[] nodes;
nodes = NULL;
+ hi.next = hi.prev = &hi;
+ lo[0].next = lo[0].prev = &lo[0];
+ lo[1].next = lo[1].prev = &lo[1];
}
Symbol *
return slot.sym;
}
+Value *
+SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
+{
+ if (!lval->compound || (lval->compMask & 0x1))
+ return base;
+ Value *slot = cloneShallow(func, base);
+
+ slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
+ slot->reg.size = lval->reg.size;
+
+ return slot;
+}
+
void
SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
{
- const DataType ty = typeOfSize(slot->reg.size);
+ const DataType ty = typeOfSize(lval->reg.size);
+
+ slot = offsetSlot(slot, lval);
Instruction *st;
if (slot->reg.file == FILE_MEMORY_LOCAL) {
- st = new_Instruction(func, OP_STORE, ty);
- st->setSrc(0, slot);
- st->setSrc(1, lval);
lval->noSpill = 1;
+ if (ty != TYPE_B96) {
+ st = new_Instruction(func, OP_STORE, ty);
+ st->setSrc(0, slot);
+ st->setSrc(1, lval);
+ } else {
+ st = new_Instruction(func, OP_SPLIT, ty);
+ st->setSrc(0, lval);
+ for (int d = 0; d < lval->reg.size / 4; ++d)
+ st->setDef(d, new_LValue(func, FILE_GPR));
+
+ for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
+ Value *tmp = cloneShallow(func, slot);
+ tmp->reg.size = 4;
+ tmp->reg.data.offset += 4 * d;
+
+ Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
+ s->setSrc(0, tmp);
+ s->setSrc(1, st->getDef(d));
+ defi->bb->insertAfter(defi, s);
+ }
+ }
} else {
st = new_Instruction(func, OP_CVT, ty);
st->setDef(0, slot);
st->setSrc(0, lval);
+ if (lval->reg.file == FILE_FLAGS)
+ st->flagsSrc = 0;
}
defi->bb->insertAfter(defi, st);
}
LValue *
SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
{
- const DataType ty = typeOfSize(slot->reg.size);
+ const DataType ty = typeOfSize(lval->reg.size);
+ slot = offsetSlot(slot, lval);
lval = cloneShallow(func, lval);
Instruction *ld;
if (slot->reg.file == FILE_MEMORY_LOCAL) {
lval->noSpill = 1;
- ld = new_Instruction(func, OP_LOAD, ty);
+ if (ty != TYPE_B96) {
+ ld = new_Instruction(func, OP_LOAD, ty);
+ } else {
+ ld = new_Instruction(func, OP_MERGE, ty);
+ for (int d = 0; d < lval->reg.size / 4; ++d) {
+ Value *tmp = cloneShallow(func, slot);
+ LValue *val;
+ tmp->reg.size = 4;
+ tmp->reg.data.offset += 4 * d;
+
+ Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
+ l->setDef(0, (val = new_LValue(func, FILE_GPR)));
+ l->setSrc(0, tmp);
+ usei->bb->insertBefore(usei, l);
+ ld->setSrc(d, val);
+ val->noSpill = 1;
+ }
+ ld->setDef(0, lval);
+ usei->bb->insertBefore(usei, ld);
+ return lval;
+ }
} else {
ld = new_Instruction(func, OP_CVT, ty);
}
ld->setDef(0, lval);
ld->setSrc(0, slot);
+ if (lval->reg.file == FILE_FLAGS)
+ ld->flagsDef = 0;
usei->bb->insertBefore(usei, ld);
return lval;
}
+static bool
+value_cmp(ValueRef *a, ValueRef *b) {
+ Instruction *ai = a->getInsn(), *bi = b->getInsn();
+ if (ai->bb != bi->bb)
+ return ai->bb->getId() < bi->bb->getId();
+ return ai->serial < bi->serial;
+}
+
+// For each value that is to be spilled, go through all its definitions.
+// A value can have multiple definitions if it has been coalesced before.
+// For each definition, first go through all its uses and insert an unspill
+// instruction before it, then replace the use with the temporary register.
+// Unspill can be either a load from memory or simply a move to another
+// register file.
+// For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
+// if we have spilled to a memory location, or simply with the new register.
+// No load or conversion instruction should be needed.
bool
SpillCodeInserter::run(const std::list<ValuePair>& lst)
{
LValue *lval = it->first->asLValue();
Symbol *mem = it->second ? it->second->asSym() : NULL;
+ // Keep track of which instructions to delete later. Deleting them
+ // inside the loop is unsafe since a single instruction may have
+ // multiple destinations that all need to be spilled (like OP_SPLIT).
+ unordered_set<Instruction *> to_del;
+
for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
++d) {
Value *slot = mem ?
LValue *dval = (*d)->get()->asLValue();
Instruction *defi = (*d)->getInsn();
- // handle uses first or they'll contain the spill stores
- while (!dval->uses.empty()) {
- ValueRef *u = dval->uses.front();
+ // Sort all the uses by BB/instruction so that we don't unspill
+ // multiple times in a row, and also remove a source of
+ // non-determinism.
+ std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
+ std::sort(refs.begin(), refs.end(), value_cmp);
+
+ // Unspill at each use *before* inserting spill instructions,
+ // we don't want to have the spill instructions in the use list here.
+ for (std::vector<ValueRef*>::const_iterator it = refs.begin();
+ it != refs.end(); ++it) {
+ ValueRef *u = *it;
Instruction *usei = u->getInsn();
assert(usei);
- if (usei->op == OP_PHI) {
+ if (usei->isPseudo()) {
tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
last = NULL;
- } else
- if (!last || usei != last->next) { // TODO: sort uses
- tmp = unspill(usei, dval, slot);
+ } else {
+ if (!last || (usei != last->next && usei != last))
+ tmp = unspill(usei, dval, slot);
last = usei;
}
u->set(tmp);
}
assert(defi);
- if (defi->op == OP_PHI) {
+ if (defi->isPseudo()) {
d = lval->defs.erase(d);
--d;
if (slot->reg.file == FILE_MEMORY_LOCAL)
- delete_Instruction(func->getProgram(), defi);
+ to_del.insert(defi);
else
defi->setDef(0, slot);
} else {
}
}
+ for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
+ it != to_del.end(); ++it)
+ delete_Instruction(func->getProgram(), *it);
}
// TODO: We're not trying to reuse old slots in a potential next iteration.
ret && i <= func->loopNestingBound;
sequence = func->cfg.nextSequence(), ++i)
ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
+ // reset marker
+ for (ArrayList::Iterator bi = func->allBBlocks.iterator();
+ !bi.end(); bi.next())
+ BasicBlock::get(bi)->liveSet.marker = false;
if (!ret)
break;
func->orderInstructions(this->insns);
Value *v = merge->getSrc(s);
v->reg.data.id = regs.bytesToId(v, reg);
v->join = v;
+ // If the value is defined by a phi/union node, we also need to
+ // perform the same fixup on that node's sources, since after RA
+ // their registers should be identical.
+ if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
+ Instruction *phi = v->getInsn();
+ for (int phis = 0; phi->srcExists(phis); ++phis)
+ phi->getSrc(phis)->join = v;
+ }
reg += v->reg.size;
}
}
constrList.push_back(merge);
}
+void
+RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
+{
+ int n, s;
+
+ if (isTextureOp(tex->op))
+ textureMask(tex);
+ condenseDefs(tex);
+
+ if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
+ condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
+ } else
+ if (isTextureOp(tex->op)) {
+ if (tex->op != OP_TXQ) {
+ s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
+ if (tex->op == OP_TXD) {
+ // Indirect handle belongs in the first arg
+ if (tex->tex.rIndirectSrc >= 0)
+ s++;
+ if (!tex->tex.target.isArray() && tex->tex.useOffsets)
+ s++;
+ }
+ n = tex->srcCount(0xff) - s;
+ } else {
+ s = tex->srcCount(0xff);
+ n = 0;
+ }
+
+ if (s > 1)
+ condenseSrcs(tex, 0, s - 1);
+ if (n > 1) // NOTE: first call modified positions already
+ condenseSrcs(tex, 1, n);
+ }
+}
+
void
RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
{
{
int n, s;
- textureMask(tex);
+ if (isTextureOp(tex->op))
+ textureMask(tex);
if (tex->op == OP_TXQ) {
s = tex->srcCount(0xff);
n = 0;
} else {
- s = tex->tex.target.getArgCount();
+ s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
if (!tex->tex.target.isArray() &&
(tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
++s;
case 0x100:
texConstraintNVE0(tex);
break;
+ case 0x110:
+ case 0x120:
+ texConstraintGM107(tex);
+ break;
default:
break;
}
condenseDefs(i);
if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
addHazard(i, i->src(0).getIndirect(0));
+ if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
+ addHazard(i, i->src(0).getIndirect(1));
} else
if (i->op == OP_UNION ||
i->op == OP_MERGE ||