nv50/ir: use C++11 standard std::unordered_map if possible
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_ra.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25
26 #include <stack>
27 #include <limits>
28 #if __cplusplus >= 201103L
29 #include <unordered_map>
30 #else
31 #include <tr1/unordered_map>
32 #endif
33
34 namespace nv50_ir {
35
36 #if __cplusplus >= 201103L
37 using std::hash;
38 using std::unordered_map;
39 #elif !defined(ANDROID)
40 using std::tr1::hash;
41 using std::tr1::unordered_map;
42 #else
43 #error Android release before Lollipop is not supported!
44 #endif
45
46 #define MAX_REGISTER_FILE_SIZE 256
47
48 class RegisterSet
49 {
50 public:
51 RegisterSet(const Target *);
52
53 void init(const Target *);
54 void reset(DataFile, bool resetMax = false);
55
56 void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
57 void intersect(DataFile f, const RegisterSet *);
58
59 bool assign(int32_t& reg, DataFile f, unsigned int size);
60 void release(DataFile f, int32_t reg, unsigned int size);
61 void occupy(DataFile f, int32_t reg, unsigned int size);
62 void occupy(const Value *);
63 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
64 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
65 bool testOccupy(const Value *);
66 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
67
68 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
69
70 inline unsigned int getFileSize(DataFile f, uint8_t regSize) const
71 {
72 if (restrictedGPR16Range && f == FILE_GPR && regSize == 2)
73 return (last[f] + 1) / 2;
74 return last[f] + 1;
75 }
76
77 inline unsigned int units(DataFile f, unsigned int size) const
78 {
79 return size >> unit[f];
80 }
81 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
82 inline unsigned int idToBytes(const Value *v) const
83 {
84 return v->reg.data.id * MIN2(v->reg.size, 4);
85 }
86 inline unsigned int idToUnits(const Value *v) const
87 {
88 return units(v->reg.file, idToBytes(v));
89 }
90 inline int bytesToId(Value *v, unsigned int bytes) const
91 {
92 if (v->reg.size < 4)
93 return units(v->reg.file, bytes);
94 return bytes / 4;
95 }
96 inline int unitsToId(DataFile f, int u, uint8_t size) const
97 {
98 if (u < 0)
99 return -1;
100 return (size < 4) ? u : ((u << unit[f]) / 4);
101 }
102
103 void print() const;
104
105 private:
106 BitSet bits[LAST_REGISTER_FILE + 1];
107
108 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
109
110 int last[LAST_REGISTER_FILE + 1];
111 int fill[LAST_REGISTER_FILE + 1];
112
113 const bool restrictedGPR16Range;
114 };
115
116 void
117 RegisterSet::reset(DataFile f, bool resetMax)
118 {
119 bits[f].fill(0);
120 if (resetMax)
121 fill[f] = -1;
122 }
123
124 void
125 RegisterSet::init(const Target *targ)
126 {
127 for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
128 DataFile f = static_cast<DataFile>(rf);
129 last[rf] = targ->getFileSize(f) - 1;
130 unit[rf] = targ->getFileUnit(f);
131 fill[rf] = -1;
132 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
133 bits[rf].allocate(last[rf] + 1, true);
134 }
135 }
136
137 RegisterSet::RegisterSet(const Target *targ)
138 : restrictedGPR16Range(targ->getChipset() < 0xc0)
139 {
140 init(targ);
141 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
142 reset(static_cast<DataFile>(i));
143 }
144
145 void
146 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
147 {
148 bits[f].periodicMask32(lock, unlock);
149 }
150
151 void
152 RegisterSet::intersect(DataFile f, const RegisterSet *set)
153 {
154 bits[f] |= set->bits[f];
155 }
156
157 void
158 RegisterSet::print() const
159 {
160 INFO("GPR:");
161 bits[FILE_GPR].print();
162 INFO("\n");
163 }
164
165 bool
166 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size)
167 {
168 reg = bits[f].findFreeRange(size);
169 if (reg < 0)
170 return false;
171 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
172 return true;
173 }
174
175 bool
176 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
177 {
178 return bits[f].testRange(reg, size);
179 }
180
181 void
182 RegisterSet::occupy(const Value *v)
183 {
184 occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
185 }
186
187 void
188 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
189 {
190 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
191 }
192
193 void
194 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
195 {
196 bits[f].setRange(reg, size);
197
198 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
199
200 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
201 }
202
203 bool
204 RegisterSet::testOccupy(const Value *v)
205 {
206 return testOccupy(v->reg.file,
207 idToUnits(v), v->reg.size >> unit[v->reg.file]);
208 }
209
210 bool
211 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
212 {
213 if (isOccupied(f, reg, size))
214 return false;
215 occupy(f, reg, size);
216 return true;
217 }
218
219 void
220 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
221 {
222 bits[f].clrRange(reg, size);
223
224 INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
225 }
226
227 class RegAlloc
228 {
229 public:
230 RegAlloc(Program *program) : prog(program), sequence(0) { }
231
232 bool exec();
233 bool execFunc();
234
235 private:
236 class PhiMovesPass : public Pass {
237 private:
238 virtual bool visit(BasicBlock *);
239 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
240 inline void splitEdges(BasicBlock *b);
241 };
242
243 class ArgumentMovesPass : public Pass {
244 private:
245 virtual bool visit(BasicBlock *);
246 };
247
248 class BuildIntervalsPass : public Pass {
249 private:
250 virtual bool visit(BasicBlock *);
251 void collectLiveValues(BasicBlock *);
252 void addLiveRange(Value *, const BasicBlock *, int end);
253 };
254
255 class InsertConstraintsPass : public Pass {
256 public:
257 bool exec(Function *func);
258 private:
259 virtual bool visit(BasicBlock *);
260
261 bool insertConstraintMoves();
262
263 void condenseDefs(Instruction *);
264 void condenseSrcs(Instruction *, const int first, const int last);
265
266 void addHazard(Instruction *i, const ValueRef *src);
267 void textureMask(TexInstruction *);
268 void addConstraint(Instruction *, int s, int n);
269 bool detectConflict(Instruction *, int s);
270
271 // target specific functions, TODO: put in subclass or Target
272 void texConstraintNV50(TexInstruction *);
273 void texConstraintNVC0(TexInstruction *);
274 void texConstraintNVE0(TexInstruction *);
275 void texConstraintGM107(TexInstruction *);
276
277 std::list<Instruction *> constrList;
278
279 const Target *targ;
280 };
281
282 bool buildLiveSets(BasicBlock *);
283
284 private:
285 Program *prog;
286 Function *func;
287
288 // instructions in control flow / chronological order
289 ArrayList insns;
290
291 int sequence; // for manual passes through CFG
292 };
293
294 typedef std::pair<Value *, Value *> ValuePair;
295
296 class SpillCodeInserter
297 {
298 public:
299 SpillCodeInserter(Function *fn) : func(fn), stackSize(0), stackBase(0) { }
300
301 bool run(const std::list<ValuePair>&);
302
303 Symbol *assignSlot(const Interval&, const unsigned int size);
304 Value *offsetSlot(Value *, const LValue *);
305 inline int32_t getStackSize() const { return stackSize; }
306
307 private:
308 Function *func;
309
310 struct SpillSlot
311 {
312 Interval occup;
313 std::list<Value *> residents; // needed to recalculate occup
314 Symbol *sym;
315 int32_t offset;
316 inline uint8_t size() const { return sym->reg.size; }
317 };
318 std::list<SpillSlot> slots;
319 int32_t stackSize;
320 int32_t stackBase;
321
322 LValue *unspill(Instruction *usei, LValue *, Value *slot);
323 void spill(Instruction *defi, Value *slot, LValue *);
324 };
325
326 void
327 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
328 const BasicBlock *bb,
329 int end)
330 {
331 Instruction *insn = val->getUniqueInsn();
332
333 if (!insn)
334 insn = bb->getFirst();
335
336 assert(bb->getFirst()->serial <= bb->getExit()->serial);
337 assert(bb->getExit()->serial + 1 >= end);
338
339 int begin = insn->serial;
340 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
341 begin = bb->getEntry()->serial;
342
343 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
344 val->id, begin, insn->serial, end);
345
346 if (begin != end) // empty ranges are only added as hazards for fixed regs
347 val->livei.extend(begin, end);
348 }
349
350 bool
351 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
352 {
353 if (b->cfg.incidentCount() <= 1)
354 return false;
355
356 int n = 0;
357 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
358 if (ei.getType() == Graph::Edge::TREE ||
359 ei.getType() == Graph::Edge::FORWARD)
360 ++n;
361 return (n == 2);
362 }
363
364 struct PhiMapHash {
365 size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
366 return hash<Instruction*>()(val.first) * 31 +
367 hash<BasicBlock*>()(val.second);
368 }
369 };
370
371 typedef unordered_map<
372 std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
373
374 // Critical edges need to be split up so that work can be inserted along
375 // specific edge transitions. Unfortunately manipulating incident edges into a
376 // BB invalidates all the PHI nodes since their sources are implicitly ordered
377 // by incident edge order.
378 //
379 // TODO: Make it so that that is not the case, and PHI nodes store pointers to
380 // the original BBs.
381 void
382 RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
383 {
384 BasicBlock *pb, *pn;
385 Instruction *phi;
386 Graph::EdgeIterator ei;
387 std::stack<BasicBlock *> stack;
388 int j = 0;
389
390 for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
391 pb = BasicBlock::get(ei.getNode());
392 assert(pb);
393 if (needNewElseBlock(bb, pb))
394 stack.push(pb);
395 }
396
397 // No critical edges were found, no need to perform any work.
398 if (stack.empty())
399 return;
400
401 // We're about to, potentially, reorder the inbound edges. This means that
402 // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
403 // nodes after the graph has been modified.
404 PhiMap phis;
405
406 j = 0;
407 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
408 pb = BasicBlock::get(ei.getNode());
409 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
410 phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
411 }
412
413 while (!stack.empty()) {
414 pb = stack.top();
415 pn = new BasicBlock(func);
416 stack.pop();
417
418 pb->cfg.detach(&bb->cfg);
419 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
420 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
421
422 assert(pb->getExit()->op != OP_CALL);
423 if (pb->getExit()->asFlow()->target.bb == bb)
424 pb->getExit()->asFlow()->target.bb = pn;
425
426 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
427 PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
428 assert(it != phis.end());
429 phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
430 phis.erase(it);
431 }
432 }
433
434 // Now go through and fix up all of the phi node sources.
435 j = 0;
436 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
437 pb = BasicBlock::get(ei.getNode());
438 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
439 PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
440 assert(it != phis.end());
441
442 phi->setSrc(j, it->second);
443 }
444 }
445 }
446
447 // For each operand of each PHI in b, generate a new value by inserting a MOV
448 // at the end of the block it is coming from and replace the operand with its
449 // result. This eliminates liveness conflicts and enables us to let values be
450 // copied to the right register if such a conflict exists nonetheless.
451 //
452 // These MOVs are also crucial in making sure the live intervals of phi srces
453 // are extended until the end of the loop, since they are not included in the
454 // live-in sets.
455 bool
456 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
457 {
458 Instruction *phi, *mov;
459
460 splitEdges(bb);
461
462 // insert MOVs (phi->src(j) should stem from j-th in-BB)
463 int j = 0;
464 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
465 BasicBlock *pb = BasicBlock::get(ei.getNode());
466 if (!pb->isTerminated())
467 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
468
469 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
470 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
471 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
472
473 mov->setSrc(0, phi->getSrc(j));
474 mov->setDef(0, tmp);
475 phi->setSrc(j, tmp);
476
477 pb->insertBefore(pb->getExit(), mov);
478 }
479 ++j;
480 }
481
482 return true;
483 }
484
485 bool
486 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
487 {
488 // Bind function call inputs/outputs to the same physical register
489 // the callee uses, inserting moves as appropriate for the case a
490 // conflict arises.
491 for (Instruction *i = bb->getEntry(); i; i = i->next) {
492 FlowInstruction *cal = i->asFlow();
493 // TODO: Handle indirect calls.
494 // Right now they should only be generated for builtins.
495 if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
496 continue;
497 RegisterSet clobberSet(prog->getTarget());
498
499 // Bind input values.
500 for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
501 const int t = cal->indirect ? (s - 1) : s;
502 LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
503 tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
504
505 Instruction *mov =
506 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
507 mov->setDef(0, tmp);
508 mov->setSrc(0, cal->getSrc(s));
509 cal->setSrc(s, tmp);
510
511 bb->insertBefore(cal, mov);
512 }
513
514 // Bind output values.
515 for (int d = 0; cal->defExists(d); ++d) {
516 LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
517 tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
518
519 Instruction *mov =
520 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
521 mov->setSrc(0, tmp);
522 mov->setDef(0, cal->getDef(d));
523 cal->setDef(d, tmp);
524
525 bb->insertAfter(cal, mov);
526 clobberSet.occupy(tmp);
527 }
528
529 // Bind clobbered values.
530 for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
531 it != cal->target.fn->clobbers.end();
532 ++it) {
533 if (clobberSet.testOccupy(*it)) {
534 Value *tmp = new_LValue(func, (*it)->asLValue());
535 tmp->reg.data.id = (*it)->reg.data.id;
536 cal->setDef(cal->defCount(), tmp);
537 }
538 }
539 }
540
541 // Update the clobber set of the function.
542 if (BasicBlock::get(func->cfgExit) == bb) {
543 func->buildDefSets();
544 for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
545 if (bb->defSet.test(i))
546 func->clobbers.push_back(func->getLValue(i));
547 }
548
549 return true;
550 }
551
552 // Build the set of live-in variables of bb.
553 bool
554 RegAlloc::buildLiveSets(BasicBlock *bb)
555 {
556 Function *f = bb->getFunction();
557 BasicBlock *bn;
558 Instruction *i;
559 unsigned int s, d;
560
561 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
562
563 bb->liveSet.allocate(func->allLValues.getSize(), false);
564
565 int n = 0;
566 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
567 bn = BasicBlock::get(ei.getNode());
568 if (bn == bb)
569 continue;
570 if (bn->cfg.visit(sequence))
571 if (!buildLiveSets(bn))
572 return false;
573 if (n++ || bb->liveSet.marker)
574 bb->liveSet |= bn->liveSet;
575 else
576 bb->liveSet = bn->liveSet;
577 }
578 if (!n && !bb->liveSet.marker)
579 bb->liveSet.fill(0);
580 bb->liveSet.marker = true;
581
582 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
583 INFO("BB:%i live set of out blocks:\n", bb->getId());
584 bb->liveSet.print();
585 }
586
587 // if (!bb->getEntry())
588 // return true;
589
590 if (bb == BasicBlock::get(f->cfgExit)) {
591 for (std::deque<ValueRef>::iterator it = f->outs.begin();
592 it != f->outs.end(); ++it) {
593 assert(it->get()->asLValue());
594 bb->liveSet.set(it->get()->id);
595 }
596 }
597
598 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
599 for (d = 0; i->defExists(d); ++d)
600 bb->liveSet.clr(i->getDef(d)->id);
601 for (s = 0; i->srcExists(s); ++s)
602 if (i->getSrc(s)->asLValue())
603 bb->liveSet.set(i->getSrc(s)->id);
604 }
605 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
606 bb->liveSet.clr(i->getDef(0)->id);
607
608 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
609 INFO("BB:%i live set after propagation:\n", bb->getId());
610 bb->liveSet.print();
611 }
612
613 return true;
614 }
615
616 void
617 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
618 {
619 BasicBlock *bbA = NULL, *bbB = NULL;
620
621 if (bb->cfg.outgoingCount()) {
622 // trickery to save a loop of OR'ing liveSets
623 // aliasing works fine with BitSet::setOr
624 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
625 if (ei.getType() == Graph::Edge::DUMMY)
626 continue;
627 if (bbA) {
628 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
629 bbA = bb;
630 } else {
631 bbA = bbB;
632 }
633 bbB = BasicBlock::get(ei.getNode());
634 }
635 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
636 } else
637 if (bb->cfg.incidentCount()) {
638 bb->liveSet.fill(0);
639 }
640 }
641
642 bool
643 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
644 {
645 collectLiveValues(bb);
646
647 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
648
649 // go through out blocks and delete phi sources that do not originate from
650 // the current block from the live set
651 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
652 BasicBlock *out = BasicBlock::get(ei.getNode());
653
654 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
655 bb->liveSet.clr(i->getDef(0)->id);
656
657 for (int s = 0; i->srcExists(s); ++s) {
658 assert(i->src(s).getInsn());
659 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
660 bb->liveSet.set(i->getSrc(s)->id);
661 else
662 bb->liveSet.clr(i->getSrc(s)->id);
663 }
664 }
665 }
666
667 // remaining live-outs are live until end
668 if (bb->getExit()) {
669 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
670 if (bb->liveSet.test(j))
671 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
672 }
673
674 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
675 for (int d = 0; i->defExists(d); ++d) {
676 bb->liveSet.clr(i->getDef(d)->id);
677 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
678 i->getDef(d)->livei.extend(i->serial, i->serial);
679 }
680
681 for (int s = 0; i->srcExists(s); ++s) {
682 if (!i->getSrc(s)->asLValue())
683 continue;
684 if (!bb->liveSet.test(i->getSrc(s)->id)) {
685 bb->liveSet.set(i->getSrc(s)->id);
686 addLiveRange(i->getSrc(s), bb, i->serial);
687 }
688 }
689 }
690
691 if (bb == BasicBlock::get(func->cfg.getRoot())) {
692 for (std::deque<ValueDef>::iterator it = func->ins.begin();
693 it != func->ins.end(); ++it) {
694 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
695 it->get()->livei.extend(0, 1);
696 }
697 }
698
699 return true;
700 }
701
702
703 #define JOIN_MASK_PHI (1 << 0)
704 #define JOIN_MASK_UNION (1 << 1)
705 #define JOIN_MASK_MOV (1 << 2)
706 #define JOIN_MASK_TEX (1 << 3)
707
708 class GCRA
709 {
710 public:
711 GCRA(Function *, SpillCodeInserter&);
712 ~GCRA();
713
714 bool allocateRegisters(ArrayList& insns);
715
716 void printNodeInfo() const;
717
718 private:
719 class RIG_Node : public Graph::Node
720 {
721 public:
722 RIG_Node();
723
724 void init(const RegisterSet&, LValue *);
725
726 void addInterference(RIG_Node *);
727 void addRegPreference(RIG_Node *);
728
729 inline LValue *getValue() const
730 {
731 return reinterpret_cast<LValue *>(data);
732 }
733 inline void setValue(LValue *lval) { data = lval; }
734
735 inline uint8_t getCompMask() const
736 {
737 return ((1 << colors) - 1) << (reg & 7);
738 }
739
740 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
741 {
742 return static_cast<RIG_Node *>(ei.getNode());
743 }
744
745 public:
746 uint32_t degree;
747 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
748 uint16_t colors;
749
750 DataFile f;
751 int32_t reg;
752
753 float weight;
754
755 // list pointers for simplify() phase
756 RIG_Node *next;
757 RIG_Node *prev;
758
759 // union of the live intervals of all coalesced values (we want to retain
760 // the separate intervals for testing interference of compound values)
761 Interval livei;
762
763 std::list<RIG_Node *> prefRegs;
764 };
765
766 private:
767 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
768
769 void buildRIG(ArrayList&);
770 bool coalesce(ArrayList&);
771 bool doCoalesce(ArrayList&, unsigned int mask);
772 void calculateSpillWeights();
773 void simplify();
774 bool selectRegisters();
775 void cleanup(const bool success);
776
777 void simplifyEdge(RIG_Node *, RIG_Node *);
778 void simplifyNode(RIG_Node *);
779
780 bool coalesceValues(Value *, Value *, bool force);
781 void resolveSplitsAndMerges();
782 void makeCompound(Instruction *, bool isSplit);
783
784 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
785
786 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
787 void checkList(std::list<RIG_Node *>&);
788
789 private:
790 std::stack<uint32_t> stack;
791
792 // list headers for simplify() phase
793 RIG_Node lo[2];
794 RIG_Node hi;
795
796 Graph RIG;
797 RIG_Node *nodes;
798 unsigned int nodeCount;
799
800 Function *func;
801 Program *prog;
802
803 static uint8_t relDegree[17][17];
804
805 RegisterSet regs;
806
807 // need to fixup register id for participants of OP_MERGE/SPLIT
808 std::list<Instruction *> merges;
809 std::list<Instruction *> splits;
810
811 SpillCodeInserter& spill;
812 std::list<ValuePair> mustSpill;
813 };
814
815 uint8_t GCRA::relDegree[17][17];
816
817 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
818 {
819 colors = 0;
820 }
821
822 void
823 GCRA::printNodeInfo() const
824 {
825 for (unsigned int i = 0; i < nodeCount; ++i) {
826 if (!nodes[i].colors)
827 continue;
828 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
829 i,
830 nodes[i].f,nodes[i].reg,nodes[i].colors,
831 nodes[i].weight,
832 nodes[i].degree, nodes[i].degreeLimit);
833
834 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
835 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
836 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
837 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
838 INFO("\n");
839 }
840 }
841
842 void
843 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
844 {
845 setValue(lval);
846 if (lval->reg.data.id >= 0)
847 lval->noSpill = lval->fixedReg = 1;
848
849 colors = regs.units(lval->reg.file, lval->reg.size);
850 f = lval->reg.file;
851 reg = -1;
852 if (lval->reg.data.id >= 0)
853 reg = regs.idToUnits(lval);
854
855 weight = std::numeric_limits<float>::infinity();
856 degree = 0;
857 degreeLimit = regs.getFileSize(f, lval->reg.size);
858 degreeLimit -= relDegree[1][colors] - 1;
859
860 livei.insert(lval->livei);
861 }
862
863 bool
864 GCRA::coalesceValues(Value *dst, Value *src, bool force)
865 {
866 LValue *rep = dst->join->asLValue();
867 LValue *val = src->join->asLValue();
868
869 if (!force && val->reg.data.id >= 0) {
870 rep = src->join->asLValue();
871 val = dst->join->asLValue();
872 }
873 RIG_Node *nRep = &nodes[rep->id];
874 RIG_Node *nVal = &nodes[val->id];
875
876 if (src->reg.file != dst->reg.file) {
877 if (!force)
878 return false;
879 WARN("forced coalescing of values in different files !\n");
880 }
881 if (!force && dst->reg.size != src->reg.size)
882 return false;
883
884 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
885 if (force) {
886 if (val->reg.data.id >= 0)
887 WARN("forced coalescing of values in different fixed regs !\n");
888 } else {
889 if (val->reg.data.id >= 0)
890 return false;
891 // make sure that there is no overlap with the fixed register of rep
892 for (ArrayList::Iterator it = func->allLValues.iterator();
893 !it.end(); it.next()) {
894 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
895 assert(reg);
896 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
897 return false;
898 }
899 }
900 }
901
902 if (!force && nRep->livei.overlaps(nVal->livei))
903 return false;
904
905 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
906 rep->id, rep->reg.data.id, val->id);
907
908 // set join pointer of all values joined with val
909 for (Value::DefIterator def = val->defs.begin(); def != val->defs.end();
910 ++def)
911 (*def)->get()->join = rep;
912 assert(rep->join == rep && val->join == rep);
913
914 // add val's definitions to rep and extend the live interval of its RIG node
915 rep->defs.insert(rep->defs.end(), val->defs.begin(), val->defs.end());
916 nRep->livei.unify(nVal->livei);
917 return true;
918 }
919
920 bool
921 GCRA::coalesce(ArrayList& insns)
922 {
923 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
924 if (!ret)
925 return false;
926 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
927 case 0x50:
928 case 0x80:
929 case 0x90:
930 case 0xa0:
931 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
932 break;
933 case 0xc0:
934 case 0xd0:
935 case 0xe0:
936 case 0xf0:
937 case 0x100:
938 case 0x110:
939 ret = doCoalesce(insns, JOIN_MASK_UNION);
940 break;
941 default:
942 break;
943 }
944 if (!ret)
945 return false;
946 return doCoalesce(insns, JOIN_MASK_MOV);
947 }
948
949 static inline uint8_t makeCompMask(int compSize, int base, int size)
950 {
951 uint8_t m = ((1 << size) - 1) << base;
952
953 switch (compSize) {
954 case 1:
955 return 0xff;
956 case 2:
957 m |= (m << 2);
958 return (m << 4) | m;
959 case 3:
960 case 4:
961 return (m << 4) | m;
962 default:
963 assert(compSize <= 8);
964 return m;
965 }
966 }
967
968 // Used when coalescing moves. The non-compound value will become one, e.g.:
969 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
970 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
971 static inline void copyCompound(Value *dst, Value *src)
972 {
973 LValue *ldst = dst->asLValue();
974 LValue *lsrc = src->asLValue();
975
976 if (ldst->compound && !lsrc->compound) {
977 LValue *swap = lsrc;
978 lsrc = ldst;
979 ldst = swap;
980 }
981
982 ldst->compound = lsrc->compound;
983 ldst->compMask = lsrc->compMask;
984 }
985
986 void
987 GCRA::makeCompound(Instruction *insn, bool split)
988 {
989 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
990
991 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
992 INFO("makeCompound(split = %i): ", split);
993 insn->print();
994 }
995
996 const unsigned int size = getNode(rep)->colors;
997 unsigned int base = 0;
998
999 if (!rep->compound)
1000 rep->compMask = 0xff;
1001 rep->compound = 1;
1002
1003 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
1004 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
1005
1006 val->compound = 1;
1007 if (!val->compMask)
1008 val->compMask = 0xff;
1009 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
1010 assert(val->compMask);
1011
1012 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
1013 rep->id, rep->compMask, val->id, val->compMask);
1014
1015 base += getNode(val)->colors;
1016 }
1017 assert(base == size);
1018 }
1019
1020 bool
1021 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
1022 {
1023 int c, n;
1024
1025 for (n = 0; n < insns.getSize(); ++n) {
1026 Instruction *i;
1027 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
1028
1029 switch (insn->op) {
1030 case OP_PHI:
1031 if (!(mask & JOIN_MASK_PHI))
1032 break;
1033 for (c = 0; insn->srcExists(c); ++c)
1034 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
1035 // this is bad
1036 ERROR("failed to coalesce phi operands\n");
1037 return false;
1038 }
1039 break;
1040 case OP_UNION:
1041 case OP_MERGE:
1042 if (!(mask & JOIN_MASK_UNION))
1043 break;
1044 for (c = 0; insn->srcExists(c); ++c)
1045 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
1046 if (insn->op == OP_MERGE) {
1047 merges.push_back(insn);
1048 if (insn->srcExists(1))
1049 makeCompound(insn, false);
1050 }
1051 break;
1052 case OP_SPLIT:
1053 if (!(mask & JOIN_MASK_UNION))
1054 break;
1055 splits.push_back(insn);
1056 for (c = 0; insn->defExists(c); ++c)
1057 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
1058 makeCompound(insn, true);
1059 break;
1060 case OP_MOV:
1061 if (!(mask & JOIN_MASK_MOV))
1062 break;
1063 i = NULL;
1064 if (!insn->getDef(0)->uses.empty())
1065 i = (*insn->getDef(0)->uses.begin())->getInsn();
1066 // if this is a contraint-move there will only be a single use
1067 if (i && i->op == OP_MERGE) // do we really still need this ?
1068 break;
1069 i = insn->getSrc(0)->getUniqueInsn();
1070 if (i && !i->constrainedDefs()) {
1071 if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
1072 copyCompound(insn->getSrc(0), insn->getDef(0));
1073 }
1074 break;
1075 case OP_TEX:
1076 case OP_TXB:
1077 case OP_TXL:
1078 case OP_TXF:
1079 case OP_TXQ:
1080 case OP_TXD:
1081 case OP_TXG:
1082 case OP_TXLQ:
1083 case OP_TEXCSAA:
1084 case OP_TEXPREP:
1085 if (!(mask & JOIN_MASK_TEX))
1086 break;
1087 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1088 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1089 break;
1090 default:
1091 break;
1092 }
1093 }
1094 return true;
1095 }
1096
1097 void
1098 GCRA::RIG_Node::addInterference(RIG_Node *node)
1099 {
1100 this->degree += relDegree[node->colors][colors];
1101 node->degree += relDegree[colors][node->colors];
1102
1103 this->attach(node, Graph::Edge::CROSS);
1104 }
1105
1106 void
1107 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1108 {
1109 prefRegs.push_back(node);
1110 }
1111
1112 GCRA::GCRA(Function *fn, SpillCodeInserter& spill) :
1113 func(fn),
1114 regs(fn->getProgram()->getTarget()),
1115 spill(spill)
1116 {
1117 prog = func->getProgram();
1118
1119 // initialize relative degrees array - i takes away from j
1120 for (int i = 1; i <= 16; ++i)
1121 for (int j = 1; j <= 16; ++j)
1122 relDegree[i][j] = j * ((i + j - 1) / j);
1123 }
1124
1125 GCRA::~GCRA()
1126 {
1127 if (nodes)
1128 delete[] nodes;
1129 }
1130
1131 void
1132 GCRA::checkList(std::list<RIG_Node *>& lst)
1133 {
1134 GCRA::RIG_Node *prev = NULL;
1135
1136 for (std::list<RIG_Node *>::iterator it = lst.begin();
1137 it != lst.end();
1138 ++it) {
1139 assert((*it)->getValue()->join == (*it)->getValue());
1140 if (prev)
1141 assert(prev->livei.begin() <= (*it)->livei.begin());
1142 prev = *it;
1143 }
1144 }
1145
1146 void
1147 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1148 {
1149 if (node->livei.isEmpty())
1150 return;
1151 // only the intervals of joined values don't necessarily arrive in order
1152 std::list<RIG_Node *>::iterator prev, it;
1153 for (it = list.end(); it != list.begin(); it = prev) {
1154 prev = it;
1155 --prev;
1156 if ((*prev)->livei.begin() <= node->livei.begin())
1157 break;
1158 }
1159 list.insert(it, node);
1160 }
1161
1162 void
1163 GCRA::buildRIG(ArrayList& insns)
1164 {
1165 std::list<RIG_Node *> values, active;
1166
1167 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1168 it != func->ins.end(); ++it)
1169 insertOrderedTail(values, getNode(it->get()->asLValue()));
1170
1171 for (int i = 0; i < insns.getSize(); ++i) {
1172 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1173 for (int d = 0; insn->defExists(d); ++d)
1174 if (insn->getDef(d)->rep() == insn->getDef(d))
1175 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1176 }
1177 checkList(values);
1178
1179 while (!values.empty()) {
1180 RIG_Node *cur = values.front();
1181
1182 for (std::list<RIG_Node *>::iterator it = active.begin();
1183 it != active.end();) {
1184 RIG_Node *node = *it;
1185
1186 if (node->livei.end() <= cur->livei.begin()) {
1187 it = active.erase(it);
1188 } else {
1189 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1190 cur->addInterference(node);
1191 ++it;
1192 }
1193 }
1194 values.pop_front();
1195 active.push_back(cur);
1196 }
1197 }
1198
1199 void
1200 GCRA::calculateSpillWeights()
1201 {
1202 for (unsigned int i = 0; i < nodeCount; ++i) {
1203 RIG_Node *const n = &nodes[i];
1204 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1205 continue;
1206 if (nodes[i].reg >= 0) {
1207 // update max reg
1208 regs.occupy(n->f, n->reg, n->colors);
1209 continue;
1210 }
1211 LValue *val = nodes[i].getValue();
1212
1213 if (!val->noSpill) {
1214 int rc = 0;
1215 for (Value::DefIterator it = val->defs.begin();
1216 it != val->defs.end();
1217 ++it)
1218 rc += (*it)->get()->refCount();
1219
1220 nodes[i].weight =
1221 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1222 }
1223
1224 if (nodes[i].degree < nodes[i].degreeLimit) {
1225 int l = 0;
1226 if (val->reg.size > 4)
1227 l = 1;
1228 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1229 } else {
1230 DLLIST_ADDHEAD(&hi, &nodes[i]);
1231 }
1232 }
1233 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1234 printNodeInfo();
1235 }
1236
1237 void
1238 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1239 {
1240 bool move = b->degree >= b->degreeLimit;
1241
1242 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1243 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1244 a->getValue()->id, a->degree, a->degreeLimit,
1245 b->getValue()->id, b->degree, b->degreeLimit);
1246
1247 b->degree -= relDegree[a->colors][b->colors];
1248
1249 move = move && b->degree < b->degreeLimit;
1250 if (move && !DLLIST_EMPTY(b)) {
1251 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1252 DLLIST_DEL(b);
1253 DLLIST_ADDTAIL(&lo[l], b);
1254 }
1255 }
1256
1257 void
1258 GCRA::simplifyNode(RIG_Node *node)
1259 {
1260 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1261 simplifyEdge(node, RIG_Node::get(ei));
1262
1263 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1264 simplifyEdge(node, RIG_Node::get(ei));
1265
1266 DLLIST_DEL(node);
1267 stack.push(node->getValue()->id);
1268
1269 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1270 node->getValue()->id,
1271 (node->degree < node->degreeLimit) ? "" : "(spill)");
1272 }
1273
1274 void
1275 GCRA::simplify()
1276 {
1277 for (;;) {
1278 if (!DLLIST_EMPTY(&lo[0])) {
1279 do {
1280 simplifyNode(lo[0].next);
1281 } while (!DLLIST_EMPTY(&lo[0]));
1282 } else
1283 if (!DLLIST_EMPTY(&lo[1])) {
1284 simplifyNode(lo[1].next);
1285 } else
1286 if (!DLLIST_EMPTY(&hi)) {
1287 RIG_Node *best = hi.next;
1288 float bestScore = best->weight / (float)best->degree;
1289 // spill candidate
1290 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1291 float score = it->weight / (float)it->degree;
1292 if (score < bestScore) {
1293 best = it;
1294 bestScore = score;
1295 }
1296 }
1297 if (isinf(bestScore)) {
1298 ERROR("no viable spill candidates left\n");
1299 break;
1300 }
1301 simplifyNode(best);
1302 } else {
1303 break;
1304 }
1305 }
1306 }
1307
1308 void
1309 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1310 {
1311 const RIG_Node *intf = RIG_Node::get(ei);
1312
1313 if (intf->reg < 0)
1314 return;
1315 const LValue *vA = node->getValue();
1316 const LValue *vB = intf->getValue();
1317
1318 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1319
1320 if (vA->compound | vB->compound) {
1321 // NOTE: this only works for >aligned< register tuples !
1322 for (Value::DefCIterator D = vA->defs.begin(); D != vA->defs.end(); ++D) {
1323 for (Value::DefCIterator d = vB->defs.begin(); d != vB->defs.end(); ++d) {
1324 const LValue *vD = (*D)->get()->asLValue();
1325 const LValue *vd = (*d)->get()->asLValue();
1326
1327 if (!vD->livei.overlaps(vd->livei)) {
1328 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1329 vD->id, vd->id);
1330 continue;
1331 }
1332
1333 uint8_t mask = vD->compound ? vD->compMask : ~0;
1334 if (vd->compound) {
1335 assert(vB->compound);
1336 mask &= vd->compMask & vB->compMask;
1337 } else {
1338 mask &= intfMask;
1339 }
1340
1341 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1342 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1343 vD->id,
1344 vD->compound ? vD->compMask : 0xff,
1345 vd->id,
1346 vd->compound ? vd->compMask : intfMask,
1347 vB->compMask, intf->reg & ~7, mask);
1348 if (mask)
1349 regs.occupyMask(node->f, intf->reg & ~7, mask);
1350 }
1351 }
1352 } else {
1353 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1354 "(%%%i) X (%%%i): $r%i + %u\n",
1355 vA->id, vB->id, intf->reg, intf->colors);
1356 regs.occupy(node->f, intf->reg, intf->colors);
1357 }
1358 }
1359
1360 bool
1361 GCRA::selectRegisters()
1362 {
1363 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1364
1365 while (!stack.empty()) {
1366 RIG_Node *node = &nodes[stack.top()];
1367 stack.pop();
1368
1369 regs.reset(node->f);
1370
1371 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1372 node->getValue()->id, node->colors);
1373
1374 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1375 checkInterference(node, ei);
1376 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1377 checkInterference(node, ei);
1378
1379 if (!node->prefRegs.empty()) {
1380 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1381 it != node->prefRegs.end();
1382 ++it) {
1383 if ((*it)->reg >= 0 &&
1384 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1385 node->reg = (*it)->reg;
1386 break;
1387 }
1388 }
1389 }
1390 if (node->reg >= 0)
1391 continue;
1392 LValue *lval = node->getValue();
1393 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1394 regs.print();
1395 bool ret = regs.assign(node->reg, node->f, node->colors);
1396 if (ret) {
1397 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1398 lval->compMask = node->getCompMask();
1399 } else {
1400 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1401 lval->id, lval->reg.size);
1402 Symbol *slot = NULL;
1403 if (lval->reg.file == FILE_GPR)
1404 slot = spill.assignSlot(node->livei, lval->reg.size);
1405 mustSpill.push_back(ValuePair(lval, slot));
1406 }
1407 }
1408 if (!mustSpill.empty())
1409 return false;
1410 for (unsigned int i = 0; i < nodeCount; ++i) {
1411 LValue *lval = nodes[i].getValue();
1412 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1413 lval->reg.data.id =
1414 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1415 }
1416 return true;
1417 }
1418
1419 bool
1420 GCRA::allocateRegisters(ArrayList& insns)
1421 {
1422 bool ret;
1423
1424 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1425 "allocateRegisters to %u instructions\n", insns.getSize());
1426
1427 nodeCount = func->allLValues.getSize();
1428 nodes = new RIG_Node[nodeCount];
1429 if (!nodes)
1430 return false;
1431 for (unsigned int i = 0; i < nodeCount; ++i) {
1432 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1433 if (lval) {
1434 nodes[i].init(regs, lval);
1435 RIG.insert(&nodes[i]);
1436 }
1437 }
1438
1439 // coalesce first, we use only 1 RIG node for a group of joined values
1440 ret = coalesce(insns);
1441 if (!ret)
1442 goto out;
1443
1444 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1445 func->printLiveIntervals();
1446
1447 buildRIG(insns);
1448 calculateSpillWeights();
1449 simplify();
1450
1451 ret = selectRegisters();
1452 if (!ret) {
1453 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1454 "selectRegisters failed, inserting spill code ...\n");
1455 regs.reset(FILE_GPR, true);
1456 spill.run(mustSpill);
1457 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1458 func->print();
1459 } else {
1460 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1461 }
1462
1463 out:
1464 cleanup(ret);
1465 return ret;
1466 }
1467
1468 void
1469 GCRA::cleanup(const bool success)
1470 {
1471 mustSpill.clear();
1472
1473 for (ArrayList::Iterator it = func->allLValues.iterator();
1474 !it.end(); it.next()) {
1475 LValue *lval = reinterpret_cast<LValue *>(it.get());
1476
1477 lval->livei.clear();
1478
1479 lval->compound = 0;
1480 lval->compMask = 0;
1481
1482 if (lval->join == lval)
1483 continue;
1484
1485 if (success) {
1486 lval->reg.data.id = lval->join->reg.data.id;
1487 } else {
1488 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1489 ++d)
1490 lval->join->defs.remove(*d);
1491 lval->join = lval;
1492 }
1493 }
1494
1495 if (success)
1496 resolveSplitsAndMerges();
1497 splits.clear(); // avoid duplicate entries on next coalesce pass
1498 merges.clear();
1499
1500 delete[] nodes;
1501 nodes = NULL;
1502 }
1503
1504 Symbol *
1505 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1506 {
1507 SpillSlot slot;
1508 int32_t offsetBase = stackSize;
1509 int32_t offset;
1510 std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1511
1512 if (offsetBase % size)
1513 offsetBase += size - (offsetBase % size);
1514
1515 slot.sym = NULL;
1516
1517 for (offset = offsetBase; offset < stackSize; offset += size) {
1518 const int32_t entryEnd = offset + size;
1519 while (it != slots.end() && it->offset < offset)
1520 ++it;
1521 if (it == slots.end()) // no slots left
1522 break;
1523 std::list<SpillSlot>::iterator bgn = it;
1524
1525 while (it != slots.end() && it->offset < entryEnd) {
1526 it->occup.print();
1527 if (it->occup.overlaps(livei))
1528 break;
1529 ++it;
1530 }
1531 if (it == slots.end() || it->offset >= entryEnd) {
1532 // fits
1533 for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1534 bgn->occup.insert(livei);
1535 if (bgn->size() == size)
1536 slot.sym = bgn->sym;
1537 }
1538 break;
1539 }
1540 }
1541 if (!slot.sym) {
1542 stackSize = offset + size;
1543 slot.offset = offset;
1544 slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1545 if (!func->stackPtr)
1546 offset += func->tlsBase;
1547 slot.sym->setAddress(NULL, offset);
1548 slot.sym->reg.size = size;
1549 slots.insert(pos, slot)->occup.insert(livei);
1550 }
1551 return slot.sym;
1552 }
1553
1554 Value *
1555 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1556 {
1557 if (!lval->compound || (lval->compMask & 0x1))
1558 return base;
1559 Value *slot = cloneShallow(func, base);
1560
1561 slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1562 slot->reg.size = lval->reg.size;
1563
1564 return slot;
1565 }
1566
1567 void
1568 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1569 {
1570 const DataType ty = typeOfSize(lval->reg.size);
1571
1572 slot = offsetSlot(slot, lval);
1573
1574 Instruction *st;
1575 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1576 st = new_Instruction(func, OP_STORE, ty);
1577 st->setSrc(0, slot);
1578 st->setSrc(1, lval);
1579 lval->noSpill = 1;
1580 } else {
1581 st = new_Instruction(func, OP_CVT, ty);
1582 st->setDef(0, slot);
1583 st->setSrc(0, lval);
1584 }
1585 defi->bb->insertAfter(defi, st);
1586 }
1587
1588 LValue *
1589 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1590 {
1591 const DataType ty = typeOfSize(lval->reg.size);
1592
1593 slot = offsetSlot(slot, lval);
1594 lval = cloneShallow(func, lval);
1595
1596 Instruction *ld;
1597 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1598 lval->noSpill = 1;
1599 ld = new_Instruction(func, OP_LOAD, ty);
1600 } else {
1601 ld = new_Instruction(func, OP_CVT, ty);
1602 }
1603 ld->setDef(0, lval);
1604 ld->setSrc(0, slot);
1605
1606 usei->bb->insertBefore(usei, ld);
1607 return lval;
1608 }
1609
1610
1611 // For each value that is to be spilled, go through all its definitions.
1612 // A value can have multiple definitions if it has been coalesced before.
1613 // For each definition, first go through all its uses and insert an unspill
1614 // instruction before it, then replace the use with the temporary register.
1615 // Unspill can be either a load from memory or simply a move to another
1616 // register file.
1617 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1618 // if we have spilled to a memory location, or simply with the new register.
1619 // No load or conversion instruction should be needed.
1620 bool
1621 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1622 {
1623 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1624 ++it) {
1625 LValue *lval = it->first->asLValue();
1626 Symbol *mem = it->second ? it->second->asSym() : NULL;
1627
1628 // Keep track of which instructions to delete later. Deleting them
1629 // inside the loop is unsafe since a single instruction may have
1630 // multiple destinations that all need to be spilled (like OP_SPLIT).
1631 unordered_set<Instruction *> to_del;
1632
1633 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1634 ++d) {
1635 Value *slot = mem ?
1636 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1637 Value *tmp = NULL;
1638 Instruction *last = NULL;
1639
1640 LValue *dval = (*d)->get()->asLValue();
1641 Instruction *defi = (*d)->getInsn();
1642
1643 // Unspill at each use *before* inserting spill instructions,
1644 // we don't want to have the spill instructions in the use list here.
1645 while (!dval->uses.empty()) {
1646 ValueRef *u = *dval->uses.begin();
1647 Instruction *usei = u->getInsn();
1648 assert(usei);
1649 if (usei->isPseudo()) {
1650 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1651 last = NULL;
1652 } else
1653 if (!last || usei != last->next) { // TODO: sort uses
1654 tmp = unspill(usei, dval, slot);
1655 last = usei;
1656 }
1657 u->set(tmp);
1658 }
1659
1660 assert(defi);
1661 if (defi->isPseudo()) {
1662 d = lval->defs.erase(d);
1663 --d;
1664 if (slot->reg.file == FILE_MEMORY_LOCAL)
1665 to_del.insert(defi);
1666 else
1667 defi->setDef(0, slot);
1668 } else {
1669 spill(defi, slot, dval);
1670 }
1671 }
1672
1673 for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
1674 it != to_del.end(); ++it)
1675 delete_Instruction(func->getProgram(), *it);
1676 }
1677
1678 // TODO: We're not trying to reuse old slots in a potential next iteration.
1679 // We have to update the slots' livei intervals to be able to do that.
1680 stackBase = stackSize;
1681 slots.clear();
1682 return true;
1683 }
1684
1685 bool
1686 RegAlloc::exec()
1687 {
1688 for (IteratorRef it = prog->calls.iteratorDFS(false);
1689 !it->end(); it->next()) {
1690 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1691
1692 func->tlsBase = prog->tlsSize;
1693 if (!execFunc())
1694 return false;
1695 prog->tlsSize += func->tlsSize;
1696 }
1697 return true;
1698 }
1699
1700 bool
1701 RegAlloc::execFunc()
1702 {
1703 InsertConstraintsPass insertConstr;
1704 PhiMovesPass insertPhiMoves;
1705 ArgumentMovesPass insertArgMoves;
1706 BuildIntervalsPass buildIntervals;
1707 SpillCodeInserter insertSpills(func);
1708
1709 GCRA gcra(func, insertSpills);
1710
1711 unsigned int i, retries;
1712 bool ret;
1713
1714 if (!func->ins.empty()) {
1715 // Insert a nop at the entry so inputs only used by the first instruction
1716 // don't count as having an empty live range.
1717 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1718 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1719 }
1720
1721 ret = insertConstr.exec(func);
1722 if (!ret)
1723 goto out;
1724
1725 ret = insertPhiMoves.run(func);
1726 if (!ret)
1727 goto out;
1728
1729 ret = insertArgMoves.run(func);
1730 if (!ret)
1731 goto out;
1732
1733 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1734 for (retries = 0; retries < 3; ++retries) {
1735 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1736 INFO("Retry: %i\n", retries);
1737 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1738 func->print();
1739
1740 // spilling to registers may add live ranges, need to rebuild everything
1741 ret = true;
1742 for (sequence = func->cfg.nextSequence(), i = 0;
1743 ret && i <= func->loopNestingBound;
1744 sequence = func->cfg.nextSequence(), ++i)
1745 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1746 // reset marker
1747 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1748 !bi.end(); bi.next())
1749 BasicBlock::get(bi)->liveSet.marker = false;
1750 if (!ret)
1751 break;
1752 func->orderInstructions(this->insns);
1753
1754 ret = buildIntervals.run(func);
1755 if (!ret)
1756 break;
1757 ret = gcra.allocateRegisters(insns);
1758 if (ret)
1759 break; // success
1760 }
1761 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1762
1763 func->tlsSize = insertSpills.getStackSize();
1764 out:
1765 return ret;
1766 }
1767
1768 // TODO: check if modifying Instruction::join here breaks anything
1769 void
1770 GCRA::resolveSplitsAndMerges()
1771 {
1772 for (std::list<Instruction *>::iterator it = splits.begin();
1773 it != splits.end();
1774 ++it) {
1775 Instruction *split = *it;
1776 unsigned int reg = regs.idToBytes(split->getSrc(0));
1777 for (int d = 0; split->defExists(d); ++d) {
1778 Value *v = split->getDef(d);
1779 v->reg.data.id = regs.bytesToId(v, reg);
1780 v->join = v;
1781 reg += v->reg.size;
1782 }
1783 }
1784 splits.clear();
1785
1786 for (std::list<Instruction *>::iterator it = merges.begin();
1787 it != merges.end();
1788 ++it) {
1789 Instruction *merge = *it;
1790 unsigned int reg = regs.idToBytes(merge->getDef(0));
1791 for (int s = 0; merge->srcExists(s); ++s) {
1792 Value *v = merge->getSrc(s);
1793 v->reg.data.id = regs.bytesToId(v, reg);
1794 v->join = v;
1795 // If the value is defined by a phi/union node, we also need to
1796 // perform the same fixup on that node's sources, since after RA
1797 // their registers should be identical.
1798 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1799 Instruction *phi = v->getInsn();
1800 for (int phis = 0; phi->srcExists(phis); ++phis)
1801 phi->getSrc(phis)->join = v;
1802 }
1803 reg += v->reg.size;
1804 }
1805 }
1806 merges.clear();
1807 }
1808
1809 bool Program::registerAllocation()
1810 {
1811 RegAlloc ra(this);
1812 return ra.exec();
1813 }
1814
1815 bool
1816 RegAlloc::InsertConstraintsPass::exec(Function *ir)
1817 {
1818 constrList.clear();
1819
1820 bool ret = run(ir, true, true);
1821 if (ret)
1822 ret = insertConstraintMoves();
1823 return ret;
1824 }
1825
1826 // TODO: make part of texture insn
1827 void
1828 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
1829 {
1830 Value *def[4];
1831 int c, k, d;
1832 uint8_t mask = 0;
1833
1834 for (d = 0, k = 0, c = 0; c < 4; ++c) {
1835 if (!(tex->tex.mask & (1 << c)))
1836 continue;
1837 if (tex->getDef(k)->refCount()) {
1838 mask |= 1 << c;
1839 def[d++] = tex->getDef(k);
1840 }
1841 ++k;
1842 }
1843 tex->tex.mask = mask;
1844
1845 for (c = 0; c < d; ++c)
1846 tex->setDef(c, def[c]);
1847 for (; c < 4; ++c)
1848 tex->setDef(c, NULL);
1849 }
1850
1851 bool
1852 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
1853 {
1854 Value *v = cst->getSrc(s);
1855
1856 // current register allocation can't handle it if a value participates in
1857 // multiple constraints
1858 for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
1859 if (cst != (*it)->getInsn())
1860 return true;
1861 }
1862
1863 // can start at s + 1 because detectConflict is called on all sources
1864 for (int c = s + 1; cst->srcExists(c); ++c)
1865 if (v == cst->getSrc(c))
1866 return true;
1867
1868 Instruction *defi = v->getInsn();
1869
1870 return (!defi || defi->constrainedDefs());
1871 }
1872
1873 void
1874 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
1875 {
1876 Instruction *cst;
1877 int d;
1878
1879 // first, look for an existing identical constraint op
1880 for (std::list<Instruction *>::iterator it = constrList.begin();
1881 it != constrList.end();
1882 ++it) {
1883 cst = (*it);
1884 if (!i->bb->dominatedBy(cst->bb))
1885 break;
1886 for (d = 0; d < n; ++d)
1887 if (cst->getSrc(d) != i->getSrc(d + s))
1888 break;
1889 if (d >= n) {
1890 for (d = 0; d < n; ++d, ++s)
1891 i->setSrc(s, cst->getDef(d));
1892 return;
1893 }
1894 }
1895 cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
1896
1897 for (d = 0; d < n; ++s, ++d) {
1898 cst->setDef(d, new_LValue(func, FILE_GPR));
1899 cst->setSrc(d, i->getSrc(s));
1900 i->setSrc(s, cst->getDef(d));
1901 }
1902 i->bb->insertBefore(i, cst);
1903
1904 constrList.push_back(cst);
1905 }
1906
1907 // Add a dummy use of the pointer source of >= 8 byte loads after the load
1908 // to prevent it from being assigned a register which overlapping the load's
1909 // destination, which would produce random corruptions.
1910 void
1911 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
1912 {
1913 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
1914 hzd->setSrc(0, src->get());
1915 i->bb->insertAfter(i, hzd);
1916
1917 }
1918
1919 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
1920 void
1921 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
1922 {
1923 uint8_t size = 0;
1924 int n;
1925 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n)
1926 size += insn->getDef(n)->reg.size;
1927 if (n < 2)
1928 return;
1929 LValue *lval = new_LValue(func, FILE_GPR);
1930 lval->reg.size = size;
1931
1932 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
1933 split->setSrc(0, lval);
1934 for (int d = 0; d < n; ++d) {
1935 split->setDef(d, insn->getDef(d));
1936 insn->setDef(d, NULL);
1937 }
1938 insn->setDef(0, lval);
1939
1940 for (int k = 1, d = n; insn->defExists(d); ++d, ++k) {
1941 insn->setDef(k, insn->getDef(d));
1942 insn->setDef(d, NULL);
1943 }
1944 // carry over predicate if any (mainly for OP_UNION uses)
1945 split->setPredicate(insn->cc, insn->getPredicate());
1946
1947 insn->bb->insertAfter(insn, split);
1948 constrList.push_back(split);
1949 }
1950 void
1951 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
1952 const int a, const int b)
1953 {
1954 uint8_t size = 0;
1955 if (a >= b)
1956 return;
1957 for (int s = a; s <= b; ++s)
1958 size += insn->getSrc(s)->reg.size;
1959 if (!size)
1960 return;
1961 LValue *lval = new_LValue(func, FILE_GPR);
1962 lval->reg.size = size;
1963
1964 Value *save[3];
1965 insn->takeExtraSources(0, save);
1966
1967 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
1968 merge->setDef(0, lval);
1969 for (int s = a, i = 0; s <= b; ++s, ++i) {
1970 merge->setSrc(i, insn->getSrc(s));
1971 insn->setSrc(s, NULL);
1972 }
1973 insn->setSrc(a, lval);
1974
1975 for (int k = a + 1, s = b + 1; insn->srcExists(s); ++s, ++k) {
1976 insn->setSrc(k, insn->getSrc(s));
1977 insn->setSrc(s, NULL);
1978 }
1979 insn->bb->insertBefore(insn, merge);
1980
1981 insn->putExtraSources(0, save);
1982
1983 constrList.push_back(merge);
1984 }
1985
1986 void
1987 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
1988 {
1989 int n, s;
1990
1991 if (isTextureOp(tex->op))
1992 textureMask(tex);
1993 condenseDefs(tex);
1994
1995 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
1996 condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
1997 } else
1998 if (isTextureOp(tex->op)) {
1999 if (tex->op != OP_TXQ) {
2000 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2001 if (tex->op == OP_TXD) {
2002 // Indirect handle belongs in the first arg
2003 if (tex->tex.rIndirectSrc >= 0)
2004 s++;
2005 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
2006 s++;
2007 }
2008 n = tex->srcCount(0xff) - s;
2009 } else {
2010 s = tex->srcCount(0xff);
2011 n = 0;
2012 }
2013
2014 if (s > 1)
2015 condenseSrcs(tex, 0, s - 1);
2016 if (n > 1) // NOTE: first call modified positions already
2017 condenseSrcs(tex, 1, n);
2018 }
2019 }
2020
2021 void
2022 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
2023 {
2024 if (isTextureOp(tex->op))
2025 textureMask(tex);
2026 condenseDefs(tex);
2027
2028 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
2029 condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
2030 } else
2031 if (isTextureOp(tex->op)) {
2032 int n = tex->srcCount(0xff, true);
2033 if (n > 4) {
2034 condenseSrcs(tex, 0, 3);
2035 if (n > 5) // NOTE: first call modified positions already
2036 condenseSrcs(tex, 4 - (4 - 1), n - 1 - (4 - 1));
2037 } else
2038 if (n > 1) {
2039 condenseSrcs(tex, 0, n - 1);
2040 }
2041 }
2042 }
2043
2044 void
2045 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
2046 {
2047 int n, s;
2048
2049 textureMask(tex);
2050
2051 if (tex->op == OP_TXQ) {
2052 s = tex->srcCount(0xff);
2053 n = 0;
2054 } else {
2055 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2056 if (!tex->tex.target.isArray() &&
2057 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
2058 ++s;
2059 if (tex->op == OP_TXD && tex->tex.useOffsets)
2060 ++s;
2061 n = tex->srcCount(0xff) - s;
2062 assert(n <= 4);
2063 }
2064
2065 if (s > 1)
2066 condenseSrcs(tex, 0, s - 1);
2067 if (n > 1) // NOTE: first call modified positions already
2068 condenseSrcs(tex, 1, n);
2069
2070 condenseDefs(tex);
2071 }
2072
2073 void
2074 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
2075 {
2076 Value *pred = tex->getPredicate();
2077 if (pred)
2078 tex->setPredicate(tex->cc, NULL);
2079
2080 textureMask(tex);
2081
2082 assert(tex->defExists(0) && tex->srcExists(0));
2083 // make src and def count match
2084 int c;
2085 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2086 if (!tex->srcExists(c))
2087 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2088 if (!tex->defExists(c))
2089 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2090 }
2091 if (pred)
2092 tex->setPredicate(tex->cc, pred);
2093 condenseDefs(tex);
2094 condenseSrcs(tex, 0, c - 1);
2095 }
2096
2097 // Insert constraint markers for instructions whose multiple sources must be
2098 // located in consecutive registers.
2099 bool
2100 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2101 {
2102 TexInstruction *tex;
2103 Instruction *next;
2104 int s, size;
2105
2106 targ = bb->getProgram()->getTarget();
2107
2108 for (Instruction *i = bb->getEntry(); i; i = next) {
2109 next = i->next;
2110
2111 if ((tex = i->asTex())) {
2112 switch (targ->getChipset() & ~0xf) {
2113 case 0x50:
2114 case 0x80:
2115 case 0x90:
2116 case 0xa0:
2117 texConstraintNV50(tex);
2118 break;
2119 case 0xc0:
2120 case 0xd0:
2121 texConstraintNVC0(tex);
2122 break;
2123 case 0xe0:
2124 case 0xf0:
2125 case 0x100:
2126 texConstraintNVE0(tex);
2127 break;
2128 case 0x110:
2129 texConstraintGM107(tex);
2130 break;
2131 default:
2132 break;
2133 }
2134 } else
2135 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2136 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2137 assert(i->srcExists(s));
2138 size -= i->getSrc(s)->reg.size;
2139 }
2140 condenseSrcs(i, 1, s - 1);
2141 } else
2142 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2143 condenseDefs(i);
2144 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2145 addHazard(i, i->src(0).getIndirect(0));
2146 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2147 addHazard(i, i->src(0).getIndirect(1));
2148 } else
2149 if (i->op == OP_UNION ||
2150 i->op == OP_MERGE ||
2151 i->op == OP_SPLIT) {
2152 constrList.push_back(i);
2153 }
2154 }
2155 return true;
2156 }
2157
2158 // Insert extra moves so that, if multiple register constraints on a value are
2159 // in conflict, these conflicts can be resolved.
2160 bool
2161 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2162 {
2163 for (std::list<Instruction *>::iterator it = constrList.begin();
2164 it != constrList.end();
2165 ++it) {
2166 Instruction *cst = *it;
2167 Instruction *mov;
2168
2169 if (cst->op == OP_SPLIT && 0) {
2170 // spilling splits is annoying, just make sure they're separate
2171 for (int d = 0; cst->defExists(d); ++d) {
2172 if (!cst->getDef(d)->refCount())
2173 continue;
2174 LValue *lval = new_LValue(func, cst->def(d).getFile());
2175 const uint8_t size = cst->def(d).getSize();
2176 lval->reg.size = size;
2177
2178 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2179 mov->setSrc(0, lval);
2180 mov->setDef(0, cst->getDef(d));
2181 cst->setDef(d, mov->getSrc(0));
2182 cst->bb->insertAfter(cst, mov);
2183
2184 cst->getSrc(0)->asLValue()->noSpill = 1;
2185 mov->getSrc(0)->asLValue()->noSpill = 1;
2186 }
2187 } else
2188 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2189 for (int s = 0; cst->srcExists(s); ++s) {
2190 const uint8_t size = cst->src(s).getSize();
2191
2192 if (!cst->getSrc(s)->defs.size()) {
2193 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2194 mov->setDef(0, cst->getSrc(s));
2195 cst->bb->insertBefore(cst, mov);
2196 continue;
2197 }
2198 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2199
2200 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2201 // catch some cases where don't really need MOVs
2202 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs())
2203 continue;
2204
2205 LValue *lval = new_LValue(func, cst->src(s).getFile());
2206 lval->reg.size = size;
2207
2208 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2209 mov->setDef(0, lval);
2210 mov->setSrc(0, cst->getSrc(s));
2211 cst->setSrc(s, mov->getDef(0));
2212 cst->bb->insertBefore(cst, mov);
2213
2214 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2215
2216 if (cst->op == OP_UNION)
2217 mov->setPredicate(defi->cc, defi->getPredicate());
2218 }
2219 }
2220 }
2221
2222 return true;
2223 }
2224
2225 } // namespace nv50_ir