78bc97f439772de26804ee3d84c08ad5d5f7fb3b
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_ra.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25
26 #include <stack>
27 #include <limits>
28 #include <tr1/unordered_set>
29
30 namespace nv50_ir {
31
32 #define MAX_REGISTER_FILE_SIZE 256
33
34 class RegisterSet
35 {
36 public:
37 RegisterSet(const Target *);
38
39 void init(const Target *);
40 void reset(DataFile, bool resetMax = false);
41
42 void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
43 void intersect(DataFile f, const RegisterSet *);
44
45 bool assign(int32_t& reg, DataFile f, unsigned int size);
46 void release(DataFile f, int32_t reg, unsigned int size);
47 void occupy(DataFile f, int32_t reg, unsigned int size);
48 void occupy(const Value *);
49 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
50 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
51 bool testOccupy(const Value *);
52 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
53
54 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
55
56 inline unsigned int getFileSize(DataFile f, uint8_t regSize) const
57 {
58 if (restrictedGPR16Range && f == FILE_GPR && regSize == 2)
59 return (last[f] + 1) / 2;
60 return last[f] + 1;
61 }
62
63 inline unsigned int units(DataFile f, unsigned int size) const
64 {
65 return size >> unit[f];
66 }
67 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
68 inline unsigned int idToBytes(const Value *v) const
69 {
70 return v->reg.data.id * MIN2(v->reg.size, 4);
71 }
72 inline unsigned int idToUnits(const Value *v) const
73 {
74 return units(v->reg.file, idToBytes(v));
75 }
76 inline int bytesToId(Value *v, unsigned int bytes) const
77 {
78 if (v->reg.size < 4)
79 return units(v->reg.file, bytes);
80 return bytes / 4;
81 }
82 inline int unitsToId(DataFile f, int u, uint8_t size) const
83 {
84 if (u < 0)
85 return -1;
86 return (size < 4) ? u : ((u << unit[f]) / 4);
87 }
88
89 void print() const;
90
91 private:
92 BitSet bits[LAST_REGISTER_FILE + 1];
93
94 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
95
96 int last[LAST_REGISTER_FILE + 1];
97 int fill[LAST_REGISTER_FILE + 1];
98
99 const bool restrictedGPR16Range;
100 };
101
102 void
103 RegisterSet::reset(DataFile f, bool resetMax)
104 {
105 bits[f].fill(0);
106 if (resetMax)
107 fill[f] = -1;
108 }
109
110 void
111 RegisterSet::init(const Target *targ)
112 {
113 for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
114 DataFile f = static_cast<DataFile>(rf);
115 last[rf] = targ->getFileSize(f) - 1;
116 unit[rf] = targ->getFileUnit(f);
117 fill[rf] = -1;
118 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
119 bits[rf].allocate(last[rf] + 1, true);
120 }
121 }
122
123 RegisterSet::RegisterSet(const Target *targ)
124 : restrictedGPR16Range(targ->getChipset() < 0xc0)
125 {
126 init(targ);
127 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
128 reset(static_cast<DataFile>(i));
129 }
130
131 void
132 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
133 {
134 bits[f].periodicMask32(lock, unlock);
135 }
136
137 void
138 RegisterSet::intersect(DataFile f, const RegisterSet *set)
139 {
140 bits[f] |= set->bits[f];
141 }
142
143 void
144 RegisterSet::print() const
145 {
146 INFO("GPR:");
147 bits[FILE_GPR].print();
148 INFO("\n");
149 }
150
151 bool
152 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size)
153 {
154 reg = bits[f].findFreeRange(size);
155 if (reg < 0)
156 return false;
157 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
158 return true;
159 }
160
161 bool
162 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
163 {
164 return bits[f].testRange(reg, size);
165 }
166
167 void
168 RegisterSet::occupy(const Value *v)
169 {
170 occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
171 }
172
173 void
174 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
175 {
176 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
177 }
178
179 void
180 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
181 {
182 bits[f].setRange(reg, size);
183
184 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
185
186 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
187 }
188
189 bool
190 RegisterSet::testOccupy(const Value *v)
191 {
192 return testOccupy(v->reg.file,
193 idToUnits(v), v->reg.size >> unit[v->reg.file]);
194 }
195
196 bool
197 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
198 {
199 if (isOccupied(f, reg, size))
200 return false;
201 occupy(f, reg, size);
202 return true;
203 }
204
205 void
206 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
207 {
208 bits[f].clrRange(reg, size);
209
210 INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
211 }
212
213 class RegAlloc
214 {
215 public:
216 RegAlloc(Program *program) : prog(program), sequence(0) { }
217
218 bool exec();
219 bool execFunc();
220
221 private:
222 class PhiMovesPass : public Pass {
223 private:
224 virtual bool visit(BasicBlock *);
225 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
226 };
227
228 class ArgumentMovesPass : public Pass {
229 private:
230 virtual bool visit(BasicBlock *);
231 };
232
233 class BuildIntervalsPass : public Pass {
234 private:
235 virtual bool visit(BasicBlock *);
236 void collectLiveValues(BasicBlock *);
237 void addLiveRange(Value *, const BasicBlock *, int end);
238 };
239
240 class InsertConstraintsPass : public Pass {
241 public:
242 bool exec(Function *func);
243 private:
244 virtual bool visit(BasicBlock *);
245
246 bool insertConstraintMoves();
247
248 void condenseDefs(Instruction *);
249 void condenseSrcs(Instruction *, const int first, const int last);
250
251 void addHazard(Instruction *i, const ValueRef *src);
252 void textureMask(TexInstruction *);
253 void addConstraint(Instruction *, int s, int n);
254 bool detectConflict(Instruction *, int s);
255
256 // target specific functions, TODO: put in subclass or Target
257 void texConstraintNV50(TexInstruction *);
258 void texConstraintNVC0(TexInstruction *);
259 void texConstraintNVE0(TexInstruction *);
260 void texConstraintGM107(TexInstruction *);
261
262 std::list<Instruction *> constrList;
263
264 const Target *targ;
265 };
266
267 bool buildLiveSets(BasicBlock *);
268
269 private:
270 Program *prog;
271 Function *func;
272
273 // instructions in control flow / chronological order
274 ArrayList insns;
275
276 int sequence; // for manual passes through CFG
277 };
278
279 typedef std::pair<Value *, Value *> ValuePair;
280
281 class SpillCodeInserter
282 {
283 public:
284 SpillCodeInserter(Function *fn) : func(fn), stackSize(0), stackBase(0) { }
285
286 bool run(const std::list<ValuePair>&);
287
288 Symbol *assignSlot(const Interval&, const unsigned int size);
289 Value *offsetSlot(Value *, const LValue *);
290 inline int32_t getStackSize() const { return stackSize; }
291
292 private:
293 Function *func;
294
295 struct SpillSlot
296 {
297 Interval occup;
298 std::list<Value *> residents; // needed to recalculate occup
299 Symbol *sym;
300 int32_t offset;
301 inline uint8_t size() const { return sym->reg.size; }
302 };
303 std::list<SpillSlot> slots;
304 int32_t stackSize;
305 int32_t stackBase;
306
307 LValue *unspill(Instruction *usei, LValue *, Value *slot);
308 void spill(Instruction *defi, Value *slot, LValue *);
309 };
310
311 void
312 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
313 const BasicBlock *bb,
314 int end)
315 {
316 Instruction *insn = val->getUniqueInsn();
317
318 if (!insn)
319 insn = bb->getFirst();
320
321 assert(bb->getFirst()->serial <= bb->getExit()->serial);
322 assert(bb->getExit()->serial + 1 >= end);
323
324 int begin = insn->serial;
325 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
326 begin = bb->getEntry()->serial;
327
328 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
329 val->id, begin, insn->serial, end);
330
331 if (begin != end) // empty ranges are only added as hazards for fixed regs
332 val->livei.extend(begin, end);
333 }
334
335 bool
336 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
337 {
338 if (b->cfg.incidentCount() <= 1)
339 return false;
340
341 int n = 0;
342 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
343 if (ei.getType() == Graph::Edge::TREE ||
344 ei.getType() == Graph::Edge::FORWARD)
345 ++n;
346 return (n == 2);
347 }
348
349 // For each operand of each PHI in b, generate a new value by inserting a MOV
350 // at the end of the block it is coming from and replace the operand with its
351 // result. This eliminates liveness conflicts and enables us to let values be
352 // copied to the right register if such a conflict exists nonetheless.
353 //
354 // These MOVs are also crucial in making sure the live intervals of phi srces
355 // are extended until the end of the loop, since they are not included in the
356 // live-in sets.
357 bool
358 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
359 {
360 Instruction *phi, *mov;
361 BasicBlock *pb, *pn;
362
363 std::stack<BasicBlock *> stack;
364
365 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
366 pb = BasicBlock::get(ei.getNode());
367 assert(pb);
368 if (needNewElseBlock(bb, pb))
369 stack.push(pb);
370 }
371 while (!stack.empty()) {
372 pb = stack.top();
373 pn = new BasicBlock(func);
374 stack.pop();
375
376 pb->cfg.detach(&bb->cfg);
377 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
378 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
379
380 assert(pb->getExit()->op != OP_CALL);
381 if (pb->getExit()->asFlow()->target.bb == bb)
382 pb->getExit()->asFlow()->target.bb = pn;
383 }
384
385 // insert MOVs (phi->src(j) should stem from j-th in-BB)
386 int j = 0;
387 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
388 pb = BasicBlock::get(ei.getNode());
389 if (!pb->isTerminated())
390 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
391
392 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
393 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
394 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
395
396 mov->setSrc(0, phi->getSrc(j));
397 mov->setDef(0, tmp);
398 phi->setSrc(j, tmp);
399
400 pb->insertBefore(pb->getExit(), mov);
401 }
402 ++j;
403 }
404
405 return true;
406 }
407
408 bool
409 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
410 {
411 // Bind function call inputs/outputs to the same physical register
412 // the callee uses, inserting moves as appropriate for the case a
413 // conflict arises.
414 for (Instruction *i = bb->getEntry(); i; i = i->next) {
415 FlowInstruction *cal = i->asFlow();
416 // TODO: Handle indirect calls.
417 // Right now they should only be generated for builtins.
418 if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
419 continue;
420 RegisterSet clobberSet(prog->getTarget());
421
422 // Bind input values.
423 for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
424 const int t = cal->indirect ? (s - 1) : s;
425 LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
426 tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
427
428 Instruction *mov =
429 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
430 mov->setDef(0, tmp);
431 mov->setSrc(0, cal->getSrc(s));
432 cal->setSrc(s, tmp);
433
434 bb->insertBefore(cal, mov);
435 }
436
437 // Bind output values.
438 for (int d = 0; cal->defExists(d); ++d) {
439 LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
440 tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
441
442 Instruction *mov =
443 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
444 mov->setSrc(0, tmp);
445 mov->setDef(0, cal->getDef(d));
446 cal->setDef(d, tmp);
447
448 bb->insertAfter(cal, mov);
449 clobberSet.occupy(tmp);
450 }
451
452 // Bind clobbered values.
453 for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
454 it != cal->target.fn->clobbers.end();
455 ++it) {
456 if (clobberSet.testOccupy(*it)) {
457 Value *tmp = new_LValue(func, (*it)->asLValue());
458 tmp->reg.data.id = (*it)->reg.data.id;
459 cal->setDef(cal->defCount(), tmp);
460 }
461 }
462 }
463
464 // Update the clobber set of the function.
465 if (BasicBlock::get(func->cfgExit) == bb) {
466 func->buildDefSets();
467 for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
468 if (bb->defSet.test(i))
469 func->clobbers.push_back(func->getLValue(i));
470 }
471
472 return true;
473 }
474
475 // Build the set of live-in variables of bb.
476 bool
477 RegAlloc::buildLiveSets(BasicBlock *bb)
478 {
479 Function *f = bb->getFunction();
480 BasicBlock *bn;
481 Instruction *i;
482 unsigned int s, d;
483
484 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
485
486 bb->liveSet.allocate(func->allLValues.getSize(), false);
487
488 int n = 0;
489 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
490 bn = BasicBlock::get(ei.getNode());
491 if (bn == bb)
492 continue;
493 if (bn->cfg.visit(sequence))
494 if (!buildLiveSets(bn))
495 return false;
496 if (n++ || bb->liveSet.marker)
497 bb->liveSet |= bn->liveSet;
498 else
499 bb->liveSet = bn->liveSet;
500 }
501 if (!n && !bb->liveSet.marker)
502 bb->liveSet.fill(0);
503 bb->liveSet.marker = true;
504
505 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
506 INFO("BB:%i live set of out blocks:\n", bb->getId());
507 bb->liveSet.print();
508 }
509
510 // if (!bb->getEntry())
511 // return true;
512
513 if (bb == BasicBlock::get(f->cfgExit)) {
514 for (std::deque<ValueRef>::iterator it = f->outs.begin();
515 it != f->outs.end(); ++it) {
516 assert(it->get()->asLValue());
517 bb->liveSet.set(it->get()->id);
518 }
519 }
520
521 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
522 for (d = 0; i->defExists(d); ++d)
523 bb->liveSet.clr(i->getDef(d)->id);
524 for (s = 0; i->srcExists(s); ++s)
525 if (i->getSrc(s)->asLValue())
526 bb->liveSet.set(i->getSrc(s)->id);
527 }
528 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
529 bb->liveSet.clr(i->getDef(0)->id);
530
531 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
532 INFO("BB:%i live set after propagation:\n", bb->getId());
533 bb->liveSet.print();
534 }
535
536 return true;
537 }
538
539 void
540 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
541 {
542 BasicBlock *bbA = NULL, *bbB = NULL;
543
544 if (bb->cfg.outgoingCount()) {
545 // trickery to save a loop of OR'ing liveSets
546 // aliasing works fine with BitSet::setOr
547 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
548 if (ei.getType() == Graph::Edge::DUMMY)
549 continue;
550 if (bbA) {
551 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
552 bbA = bb;
553 } else {
554 bbA = bbB;
555 }
556 bbB = BasicBlock::get(ei.getNode());
557 }
558 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
559 } else
560 if (bb->cfg.incidentCount()) {
561 bb->liveSet.fill(0);
562 }
563 }
564
565 bool
566 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
567 {
568 collectLiveValues(bb);
569
570 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
571
572 // go through out blocks and delete phi sources that do not originate from
573 // the current block from the live set
574 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
575 BasicBlock *out = BasicBlock::get(ei.getNode());
576
577 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
578 bb->liveSet.clr(i->getDef(0)->id);
579
580 for (int s = 0; i->srcExists(s); ++s) {
581 assert(i->src(s).getInsn());
582 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
583 bb->liveSet.set(i->getSrc(s)->id);
584 else
585 bb->liveSet.clr(i->getSrc(s)->id);
586 }
587 }
588 }
589
590 // remaining live-outs are live until end
591 if (bb->getExit()) {
592 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
593 if (bb->liveSet.test(j))
594 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
595 }
596
597 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
598 for (int d = 0; i->defExists(d); ++d) {
599 bb->liveSet.clr(i->getDef(d)->id);
600 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
601 i->getDef(d)->livei.extend(i->serial, i->serial);
602 }
603
604 for (int s = 0; i->srcExists(s); ++s) {
605 if (!i->getSrc(s)->asLValue())
606 continue;
607 if (!bb->liveSet.test(i->getSrc(s)->id)) {
608 bb->liveSet.set(i->getSrc(s)->id);
609 addLiveRange(i->getSrc(s), bb, i->serial);
610 }
611 }
612 }
613
614 if (bb == BasicBlock::get(func->cfg.getRoot())) {
615 for (std::deque<ValueDef>::iterator it = func->ins.begin();
616 it != func->ins.end(); ++it) {
617 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
618 it->get()->livei.extend(0, 1);
619 }
620 }
621
622 return true;
623 }
624
625
626 #define JOIN_MASK_PHI (1 << 0)
627 #define JOIN_MASK_UNION (1 << 1)
628 #define JOIN_MASK_MOV (1 << 2)
629 #define JOIN_MASK_TEX (1 << 3)
630
631 class GCRA
632 {
633 public:
634 GCRA(Function *, SpillCodeInserter&);
635 ~GCRA();
636
637 bool allocateRegisters(ArrayList& insns);
638
639 void printNodeInfo() const;
640
641 private:
642 class RIG_Node : public Graph::Node
643 {
644 public:
645 RIG_Node();
646
647 void init(const RegisterSet&, LValue *);
648
649 void addInterference(RIG_Node *);
650 void addRegPreference(RIG_Node *);
651
652 inline LValue *getValue() const
653 {
654 return reinterpret_cast<LValue *>(data);
655 }
656 inline void setValue(LValue *lval) { data = lval; }
657
658 inline uint8_t getCompMask() const
659 {
660 return ((1 << colors) - 1) << (reg & 7);
661 }
662
663 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
664 {
665 return static_cast<RIG_Node *>(ei.getNode());
666 }
667
668 public:
669 uint32_t degree;
670 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
671 uint16_t colors;
672
673 DataFile f;
674 int32_t reg;
675
676 float weight;
677
678 // list pointers for simplify() phase
679 RIG_Node *next;
680 RIG_Node *prev;
681
682 // union of the live intervals of all coalesced values (we want to retain
683 // the separate intervals for testing interference of compound values)
684 Interval livei;
685
686 std::list<RIG_Node *> prefRegs;
687 };
688
689 private:
690 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
691
692 void buildRIG(ArrayList&);
693 bool coalesce(ArrayList&);
694 bool doCoalesce(ArrayList&, unsigned int mask);
695 void calculateSpillWeights();
696 void simplify();
697 bool selectRegisters();
698 void cleanup(const bool success);
699
700 void simplifyEdge(RIG_Node *, RIG_Node *);
701 void simplifyNode(RIG_Node *);
702
703 bool coalesceValues(Value *, Value *, bool force);
704 void resolveSplitsAndMerges();
705 void makeCompound(Instruction *, bool isSplit);
706
707 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
708
709 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
710 void checkList(std::list<RIG_Node *>&);
711
712 private:
713 std::stack<uint32_t> stack;
714
715 // list headers for simplify() phase
716 RIG_Node lo[2];
717 RIG_Node hi;
718
719 Graph RIG;
720 RIG_Node *nodes;
721 unsigned int nodeCount;
722
723 Function *func;
724 Program *prog;
725
726 static uint8_t relDegree[17][17];
727
728 RegisterSet regs;
729
730 // need to fixup register id for participants of OP_MERGE/SPLIT
731 std::list<Instruction *> merges;
732 std::list<Instruction *> splits;
733
734 SpillCodeInserter& spill;
735 std::list<ValuePair> mustSpill;
736 };
737
738 uint8_t GCRA::relDegree[17][17];
739
740 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
741 {
742 colors = 0;
743 }
744
745 void
746 GCRA::printNodeInfo() const
747 {
748 for (unsigned int i = 0; i < nodeCount; ++i) {
749 if (!nodes[i].colors)
750 continue;
751 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
752 i,
753 nodes[i].f,nodes[i].reg,nodes[i].colors,
754 nodes[i].weight,
755 nodes[i].degree, nodes[i].degreeLimit);
756
757 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
758 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
759 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
760 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
761 INFO("\n");
762 }
763 }
764
765 void
766 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
767 {
768 setValue(lval);
769 if (lval->reg.data.id >= 0)
770 lval->noSpill = lval->fixedReg = 1;
771
772 colors = regs.units(lval->reg.file, lval->reg.size);
773 f = lval->reg.file;
774 reg = -1;
775 if (lval->reg.data.id >= 0)
776 reg = regs.idToUnits(lval);
777
778 weight = std::numeric_limits<float>::infinity();
779 degree = 0;
780 degreeLimit = regs.getFileSize(f, lval->reg.size);
781 degreeLimit -= relDegree[1][colors] - 1;
782
783 livei.insert(lval->livei);
784 }
785
786 bool
787 GCRA::coalesceValues(Value *dst, Value *src, bool force)
788 {
789 LValue *rep = dst->join->asLValue();
790 LValue *val = src->join->asLValue();
791
792 if (!force && val->reg.data.id >= 0) {
793 rep = src->join->asLValue();
794 val = dst->join->asLValue();
795 }
796 RIG_Node *nRep = &nodes[rep->id];
797 RIG_Node *nVal = &nodes[val->id];
798
799 if (src->reg.file != dst->reg.file) {
800 if (!force)
801 return false;
802 WARN("forced coalescing of values in different files !\n");
803 }
804 if (!force && dst->reg.size != src->reg.size)
805 return false;
806
807 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
808 if (force) {
809 if (val->reg.data.id >= 0)
810 WARN("forced coalescing of values in different fixed regs !\n");
811 } else {
812 if (val->reg.data.id >= 0)
813 return false;
814 // make sure that there is no overlap with the fixed register of rep
815 for (ArrayList::Iterator it = func->allLValues.iterator();
816 !it.end(); it.next()) {
817 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
818 assert(reg);
819 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
820 return false;
821 }
822 }
823 }
824
825 if (!force && nRep->livei.overlaps(nVal->livei))
826 return false;
827
828 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
829 rep->id, rep->reg.data.id, val->id);
830
831 // set join pointer of all values joined with val
832 for (Value::DefIterator def = val->defs.begin(); def != val->defs.end();
833 ++def)
834 (*def)->get()->join = rep;
835 assert(rep->join == rep && val->join == rep);
836
837 // add val's definitions to rep and extend the live interval of its RIG node
838 rep->defs.insert(rep->defs.end(), val->defs.begin(), val->defs.end());
839 nRep->livei.unify(nVal->livei);
840 return true;
841 }
842
843 bool
844 GCRA::coalesce(ArrayList& insns)
845 {
846 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
847 if (!ret)
848 return false;
849 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
850 case 0x50:
851 case 0x80:
852 case 0x90:
853 case 0xa0:
854 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
855 break;
856 case 0xc0:
857 case 0xd0:
858 case 0xe0:
859 case 0xf0:
860 case 0x100:
861 case 0x110:
862 ret = doCoalesce(insns, JOIN_MASK_UNION);
863 break;
864 default:
865 break;
866 }
867 if (!ret)
868 return false;
869 return doCoalesce(insns, JOIN_MASK_MOV);
870 }
871
872 static inline uint8_t makeCompMask(int compSize, int base, int size)
873 {
874 uint8_t m = ((1 << size) - 1) << base;
875
876 switch (compSize) {
877 case 1:
878 return 0xff;
879 case 2:
880 m |= (m << 2);
881 return (m << 4) | m;
882 case 3:
883 case 4:
884 return (m << 4) | m;
885 default:
886 assert(compSize <= 8);
887 return m;
888 }
889 }
890
891 // Used when coalescing moves. The non-compound value will become one, e.g.:
892 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
893 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
894 static inline void copyCompound(Value *dst, Value *src)
895 {
896 LValue *ldst = dst->asLValue();
897 LValue *lsrc = src->asLValue();
898
899 if (ldst->compound && !lsrc->compound) {
900 LValue *swap = lsrc;
901 lsrc = ldst;
902 ldst = swap;
903 }
904
905 ldst->compound = lsrc->compound;
906 ldst->compMask = lsrc->compMask;
907 }
908
909 void
910 GCRA::makeCompound(Instruction *insn, bool split)
911 {
912 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
913
914 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
915 INFO("makeCompound(split = %i): ", split);
916 insn->print();
917 }
918
919 const unsigned int size = getNode(rep)->colors;
920 unsigned int base = 0;
921
922 if (!rep->compound)
923 rep->compMask = 0xff;
924 rep->compound = 1;
925
926 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
927 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
928
929 val->compound = 1;
930 if (!val->compMask)
931 val->compMask = 0xff;
932 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
933 assert(val->compMask);
934
935 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
936 rep->id, rep->compMask, val->id, val->compMask);
937
938 base += getNode(val)->colors;
939 }
940 assert(base == size);
941 }
942
943 bool
944 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
945 {
946 int c, n;
947
948 for (n = 0; n < insns.getSize(); ++n) {
949 Instruction *i;
950 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
951
952 switch (insn->op) {
953 case OP_PHI:
954 if (!(mask & JOIN_MASK_PHI))
955 break;
956 for (c = 0; insn->srcExists(c); ++c)
957 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
958 // this is bad
959 ERROR("failed to coalesce phi operands\n");
960 return false;
961 }
962 break;
963 case OP_UNION:
964 case OP_MERGE:
965 if (!(mask & JOIN_MASK_UNION))
966 break;
967 for (c = 0; insn->srcExists(c); ++c)
968 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
969 if (insn->op == OP_MERGE) {
970 merges.push_back(insn);
971 if (insn->srcExists(1))
972 makeCompound(insn, false);
973 }
974 break;
975 case OP_SPLIT:
976 if (!(mask & JOIN_MASK_UNION))
977 break;
978 splits.push_back(insn);
979 for (c = 0; insn->defExists(c); ++c)
980 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
981 makeCompound(insn, true);
982 break;
983 case OP_MOV:
984 if (!(mask & JOIN_MASK_MOV))
985 break;
986 i = NULL;
987 if (!insn->getDef(0)->uses.empty())
988 i = (*insn->getDef(0)->uses.begin())->getInsn();
989 // if this is a contraint-move there will only be a single use
990 if (i && i->op == OP_MERGE) // do we really still need this ?
991 break;
992 i = insn->getSrc(0)->getUniqueInsn();
993 if (i && !i->constrainedDefs()) {
994 if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
995 copyCompound(insn->getSrc(0), insn->getDef(0));
996 }
997 break;
998 case OP_TEX:
999 case OP_TXB:
1000 case OP_TXL:
1001 case OP_TXF:
1002 case OP_TXQ:
1003 case OP_TXD:
1004 case OP_TXG:
1005 case OP_TXLQ:
1006 case OP_TEXCSAA:
1007 case OP_TEXPREP:
1008 if (!(mask & JOIN_MASK_TEX))
1009 break;
1010 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1011 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1012 break;
1013 default:
1014 break;
1015 }
1016 }
1017 return true;
1018 }
1019
1020 void
1021 GCRA::RIG_Node::addInterference(RIG_Node *node)
1022 {
1023 this->degree += relDegree[node->colors][colors];
1024 node->degree += relDegree[colors][node->colors];
1025
1026 this->attach(node, Graph::Edge::CROSS);
1027 }
1028
1029 void
1030 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1031 {
1032 prefRegs.push_back(node);
1033 }
1034
1035 GCRA::GCRA(Function *fn, SpillCodeInserter& spill) :
1036 func(fn),
1037 regs(fn->getProgram()->getTarget()),
1038 spill(spill)
1039 {
1040 prog = func->getProgram();
1041
1042 // initialize relative degrees array - i takes away from j
1043 for (int i = 1; i <= 16; ++i)
1044 for (int j = 1; j <= 16; ++j)
1045 relDegree[i][j] = j * ((i + j - 1) / j);
1046 }
1047
1048 GCRA::~GCRA()
1049 {
1050 if (nodes)
1051 delete[] nodes;
1052 }
1053
1054 void
1055 GCRA::checkList(std::list<RIG_Node *>& lst)
1056 {
1057 GCRA::RIG_Node *prev = NULL;
1058
1059 for (std::list<RIG_Node *>::iterator it = lst.begin();
1060 it != lst.end();
1061 ++it) {
1062 assert((*it)->getValue()->join == (*it)->getValue());
1063 if (prev)
1064 assert(prev->livei.begin() <= (*it)->livei.begin());
1065 prev = *it;
1066 }
1067 }
1068
1069 void
1070 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1071 {
1072 if (node->livei.isEmpty())
1073 return;
1074 // only the intervals of joined values don't necessarily arrive in order
1075 std::list<RIG_Node *>::iterator prev, it;
1076 for (it = list.end(); it != list.begin(); it = prev) {
1077 prev = it;
1078 --prev;
1079 if ((*prev)->livei.begin() <= node->livei.begin())
1080 break;
1081 }
1082 list.insert(it, node);
1083 }
1084
1085 void
1086 GCRA::buildRIG(ArrayList& insns)
1087 {
1088 std::list<RIG_Node *> values, active;
1089
1090 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1091 it != func->ins.end(); ++it)
1092 insertOrderedTail(values, getNode(it->get()->asLValue()));
1093
1094 for (int i = 0; i < insns.getSize(); ++i) {
1095 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1096 for (int d = 0; insn->defExists(d); ++d)
1097 if (insn->getDef(d)->rep() == insn->getDef(d))
1098 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1099 }
1100 checkList(values);
1101
1102 while (!values.empty()) {
1103 RIG_Node *cur = values.front();
1104
1105 for (std::list<RIG_Node *>::iterator it = active.begin();
1106 it != active.end();) {
1107 RIG_Node *node = *it;
1108
1109 if (node->livei.end() <= cur->livei.begin()) {
1110 it = active.erase(it);
1111 } else {
1112 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1113 cur->addInterference(node);
1114 ++it;
1115 }
1116 }
1117 values.pop_front();
1118 active.push_back(cur);
1119 }
1120 }
1121
1122 void
1123 GCRA::calculateSpillWeights()
1124 {
1125 for (unsigned int i = 0; i < nodeCount; ++i) {
1126 RIG_Node *const n = &nodes[i];
1127 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1128 continue;
1129 if (nodes[i].reg >= 0) {
1130 // update max reg
1131 regs.occupy(n->f, n->reg, n->colors);
1132 continue;
1133 }
1134 LValue *val = nodes[i].getValue();
1135
1136 if (!val->noSpill) {
1137 int rc = 0;
1138 for (Value::DefIterator it = val->defs.begin();
1139 it != val->defs.end();
1140 ++it)
1141 rc += (*it)->get()->refCount();
1142
1143 nodes[i].weight =
1144 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1145 }
1146
1147 if (nodes[i].degree < nodes[i].degreeLimit) {
1148 int l = 0;
1149 if (val->reg.size > 4)
1150 l = 1;
1151 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1152 } else {
1153 DLLIST_ADDHEAD(&hi, &nodes[i]);
1154 }
1155 }
1156 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1157 printNodeInfo();
1158 }
1159
1160 void
1161 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1162 {
1163 bool move = b->degree >= b->degreeLimit;
1164
1165 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1166 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1167 a->getValue()->id, a->degree, a->degreeLimit,
1168 b->getValue()->id, b->degree, b->degreeLimit);
1169
1170 b->degree -= relDegree[a->colors][b->colors];
1171
1172 move = move && b->degree < b->degreeLimit;
1173 if (move && !DLLIST_EMPTY(b)) {
1174 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1175 DLLIST_DEL(b);
1176 DLLIST_ADDTAIL(&lo[l], b);
1177 }
1178 }
1179
1180 void
1181 GCRA::simplifyNode(RIG_Node *node)
1182 {
1183 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1184 simplifyEdge(node, RIG_Node::get(ei));
1185
1186 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1187 simplifyEdge(node, RIG_Node::get(ei));
1188
1189 DLLIST_DEL(node);
1190 stack.push(node->getValue()->id);
1191
1192 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1193 node->getValue()->id,
1194 (node->degree < node->degreeLimit) ? "" : "(spill)");
1195 }
1196
1197 void
1198 GCRA::simplify()
1199 {
1200 for (;;) {
1201 if (!DLLIST_EMPTY(&lo[0])) {
1202 do {
1203 simplifyNode(lo[0].next);
1204 } while (!DLLIST_EMPTY(&lo[0]));
1205 } else
1206 if (!DLLIST_EMPTY(&lo[1])) {
1207 simplifyNode(lo[1].next);
1208 } else
1209 if (!DLLIST_EMPTY(&hi)) {
1210 RIG_Node *best = hi.next;
1211 float bestScore = best->weight / (float)best->degree;
1212 // spill candidate
1213 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1214 float score = it->weight / (float)it->degree;
1215 if (score < bestScore) {
1216 best = it;
1217 bestScore = score;
1218 }
1219 }
1220 if (isinf(bestScore)) {
1221 ERROR("no viable spill candidates left\n");
1222 break;
1223 }
1224 simplifyNode(best);
1225 } else {
1226 break;
1227 }
1228 }
1229 }
1230
1231 void
1232 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1233 {
1234 const RIG_Node *intf = RIG_Node::get(ei);
1235
1236 if (intf->reg < 0)
1237 return;
1238 const LValue *vA = node->getValue();
1239 const LValue *vB = intf->getValue();
1240
1241 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1242
1243 if (vA->compound | vB->compound) {
1244 // NOTE: this only works for >aligned< register tuples !
1245 for (Value::DefCIterator D = vA->defs.begin(); D != vA->defs.end(); ++D) {
1246 for (Value::DefCIterator d = vB->defs.begin(); d != vB->defs.end(); ++d) {
1247 const LValue *vD = (*D)->get()->asLValue();
1248 const LValue *vd = (*d)->get()->asLValue();
1249
1250 if (!vD->livei.overlaps(vd->livei)) {
1251 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1252 vD->id, vd->id);
1253 continue;
1254 }
1255
1256 uint8_t mask = vD->compound ? vD->compMask : ~0;
1257 if (vd->compound) {
1258 assert(vB->compound);
1259 mask &= vd->compMask & vB->compMask;
1260 } else {
1261 mask &= intfMask;
1262 }
1263
1264 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1265 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1266 vD->id,
1267 vD->compound ? vD->compMask : 0xff,
1268 vd->id,
1269 vd->compound ? vd->compMask : intfMask,
1270 vB->compMask, intf->reg & ~7, mask);
1271 if (mask)
1272 regs.occupyMask(node->f, intf->reg & ~7, mask);
1273 }
1274 }
1275 } else {
1276 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1277 "(%%%i) X (%%%i): $r%i + %u\n",
1278 vA->id, vB->id, intf->reg, intf->colors);
1279 regs.occupy(node->f, intf->reg, intf->colors);
1280 }
1281 }
1282
1283 bool
1284 GCRA::selectRegisters()
1285 {
1286 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1287
1288 while (!stack.empty()) {
1289 RIG_Node *node = &nodes[stack.top()];
1290 stack.pop();
1291
1292 regs.reset(node->f);
1293
1294 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1295 node->getValue()->id, node->colors);
1296
1297 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1298 checkInterference(node, ei);
1299 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1300 checkInterference(node, ei);
1301
1302 if (!node->prefRegs.empty()) {
1303 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1304 it != node->prefRegs.end();
1305 ++it) {
1306 if ((*it)->reg >= 0 &&
1307 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1308 node->reg = (*it)->reg;
1309 break;
1310 }
1311 }
1312 }
1313 if (node->reg >= 0)
1314 continue;
1315 LValue *lval = node->getValue();
1316 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1317 regs.print();
1318 bool ret = regs.assign(node->reg, node->f, node->colors);
1319 if (ret) {
1320 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1321 lval->compMask = node->getCompMask();
1322 } else {
1323 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1324 lval->id, lval->reg.size);
1325 Symbol *slot = NULL;
1326 if (lval->reg.file == FILE_GPR)
1327 slot = spill.assignSlot(node->livei, lval->reg.size);
1328 mustSpill.push_back(ValuePair(lval, slot));
1329 }
1330 }
1331 if (!mustSpill.empty())
1332 return false;
1333 for (unsigned int i = 0; i < nodeCount; ++i) {
1334 LValue *lval = nodes[i].getValue();
1335 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1336 lval->reg.data.id =
1337 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1338 }
1339 return true;
1340 }
1341
1342 bool
1343 GCRA::allocateRegisters(ArrayList& insns)
1344 {
1345 bool ret;
1346
1347 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1348 "allocateRegisters to %u instructions\n", insns.getSize());
1349
1350 nodeCount = func->allLValues.getSize();
1351 nodes = new RIG_Node[nodeCount];
1352 if (!nodes)
1353 return false;
1354 for (unsigned int i = 0; i < nodeCount; ++i) {
1355 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1356 if (lval) {
1357 nodes[i].init(regs, lval);
1358 RIG.insert(&nodes[i]);
1359 }
1360 }
1361
1362 // coalesce first, we use only 1 RIG node for a group of joined values
1363 ret = coalesce(insns);
1364 if (!ret)
1365 goto out;
1366
1367 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1368 func->printLiveIntervals();
1369
1370 buildRIG(insns);
1371 calculateSpillWeights();
1372 simplify();
1373
1374 ret = selectRegisters();
1375 if (!ret) {
1376 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1377 "selectRegisters failed, inserting spill code ...\n");
1378 regs.reset(FILE_GPR, true);
1379 spill.run(mustSpill);
1380 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1381 func->print();
1382 } else {
1383 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1384 }
1385
1386 out:
1387 cleanup(ret);
1388 return ret;
1389 }
1390
1391 void
1392 GCRA::cleanup(const bool success)
1393 {
1394 mustSpill.clear();
1395
1396 for (ArrayList::Iterator it = func->allLValues.iterator();
1397 !it.end(); it.next()) {
1398 LValue *lval = reinterpret_cast<LValue *>(it.get());
1399
1400 lval->livei.clear();
1401
1402 lval->compound = 0;
1403 lval->compMask = 0;
1404
1405 if (lval->join == lval)
1406 continue;
1407
1408 if (success) {
1409 lval->reg.data.id = lval->join->reg.data.id;
1410 } else {
1411 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1412 ++d)
1413 lval->join->defs.remove(*d);
1414 lval->join = lval;
1415 }
1416 }
1417
1418 if (success)
1419 resolveSplitsAndMerges();
1420 splits.clear(); // avoid duplicate entries on next coalesce pass
1421 merges.clear();
1422
1423 delete[] nodes;
1424 nodes = NULL;
1425 }
1426
1427 Symbol *
1428 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1429 {
1430 SpillSlot slot;
1431 int32_t offsetBase = stackSize;
1432 int32_t offset;
1433 std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1434
1435 if (offsetBase % size)
1436 offsetBase += size - (offsetBase % size);
1437
1438 slot.sym = NULL;
1439
1440 for (offset = offsetBase; offset < stackSize; offset += size) {
1441 const int32_t entryEnd = offset + size;
1442 while (it != slots.end() && it->offset < offset)
1443 ++it;
1444 if (it == slots.end()) // no slots left
1445 break;
1446 std::list<SpillSlot>::iterator bgn = it;
1447
1448 while (it != slots.end() && it->offset < entryEnd) {
1449 it->occup.print();
1450 if (it->occup.overlaps(livei))
1451 break;
1452 ++it;
1453 }
1454 if (it == slots.end() || it->offset >= entryEnd) {
1455 // fits
1456 for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1457 bgn->occup.insert(livei);
1458 if (bgn->size() == size)
1459 slot.sym = bgn->sym;
1460 }
1461 break;
1462 }
1463 }
1464 if (!slot.sym) {
1465 stackSize = offset + size;
1466 slot.offset = offset;
1467 slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1468 if (!func->stackPtr)
1469 offset += func->tlsBase;
1470 slot.sym->setAddress(NULL, offset);
1471 slot.sym->reg.size = size;
1472 slots.insert(pos, slot)->occup.insert(livei);
1473 }
1474 return slot.sym;
1475 }
1476
1477 Value *
1478 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1479 {
1480 if (!lval->compound || (lval->compMask & 0x1))
1481 return base;
1482 Value *slot = cloneShallow(func, base);
1483
1484 slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1485 slot->reg.size = lval->reg.size;
1486
1487 return slot;
1488 }
1489
1490 void
1491 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1492 {
1493 const DataType ty = typeOfSize(lval->reg.size);
1494
1495 slot = offsetSlot(slot, lval);
1496
1497 Instruction *st;
1498 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1499 st = new_Instruction(func, OP_STORE, ty);
1500 st->setSrc(0, slot);
1501 st->setSrc(1, lval);
1502 lval->noSpill = 1;
1503 } else {
1504 st = new_Instruction(func, OP_CVT, ty);
1505 st->setDef(0, slot);
1506 st->setSrc(0, lval);
1507 }
1508 defi->bb->insertAfter(defi, st);
1509 }
1510
1511 LValue *
1512 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1513 {
1514 const DataType ty = typeOfSize(lval->reg.size);
1515
1516 slot = offsetSlot(slot, lval);
1517 lval = cloneShallow(func, lval);
1518
1519 Instruction *ld;
1520 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1521 lval->noSpill = 1;
1522 ld = new_Instruction(func, OP_LOAD, ty);
1523 } else {
1524 ld = new_Instruction(func, OP_CVT, ty);
1525 }
1526 ld->setDef(0, lval);
1527 ld->setSrc(0, slot);
1528
1529 usei->bb->insertBefore(usei, ld);
1530 return lval;
1531 }
1532
1533
1534 // For each value that is to be spilled, go through all its definitions.
1535 // A value can have multiple definitions if it has been coalesced before.
1536 // For each definition, first go through all its uses and insert an unspill
1537 // instruction before it, then replace the use with the temporary register.
1538 // Unspill can be either a load from memory or simply a move to another
1539 // register file.
1540 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1541 // if we have spilled to a memory location, or simply with the new register.
1542 // No load or conversion instruction should be needed.
1543 bool
1544 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1545 {
1546 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1547 ++it) {
1548 LValue *lval = it->first->asLValue();
1549 Symbol *mem = it->second ? it->second->asSym() : NULL;
1550
1551 // Keep track of which instructions to delete later. Deleting them
1552 // inside the loop is unsafe since a single instruction may have
1553 // multiple destinations that all need to be spilled (like OP_SPLIT).
1554 std::tr1::unordered_set<Instruction *> to_del;
1555
1556 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1557 ++d) {
1558 Value *slot = mem ?
1559 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1560 Value *tmp = NULL;
1561 Instruction *last = NULL;
1562
1563 LValue *dval = (*d)->get()->asLValue();
1564 Instruction *defi = (*d)->getInsn();
1565
1566 // Unspill at each use *before* inserting spill instructions,
1567 // we don't want to have the spill instructions in the use list here.
1568 while (!dval->uses.empty()) {
1569 ValueRef *u = *dval->uses.begin();
1570 Instruction *usei = u->getInsn();
1571 assert(usei);
1572 if (usei->isPseudo()) {
1573 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1574 last = NULL;
1575 } else
1576 if (!last || usei != last->next) { // TODO: sort uses
1577 tmp = unspill(usei, dval, slot);
1578 last = usei;
1579 }
1580 u->set(tmp);
1581 }
1582
1583 assert(defi);
1584 if (defi->isPseudo()) {
1585 d = lval->defs.erase(d);
1586 --d;
1587 if (slot->reg.file == FILE_MEMORY_LOCAL)
1588 to_del.insert(defi);
1589 else
1590 defi->setDef(0, slot);
1591 } else {
1592 spill(defi, slot, dval);
1593 }
1594 }
1595
1596 for (std::tr1::unordered_set<Instruction *>::const_iterator it = to_del.begin();
1597 it != to_del.end(); ++it)
1598 delete_Instruction(func->getProgram(), *it);
1599 }
1600
1601 // TODO: We're not trying to reuse old slots in a potential next iteration.
1602 // We have to update the slots' livei intervals to be able to do that.
1603 stackBase = stackSize;
1604 slots.clear();
1605 return true;
1606 }
1607
1608 bool
1609 RegAlloc::exec()
1610 {
1611 for (IteratorRef it = prog->calls.iteratorDFS(false);
1612 !it->end(); it->next()) {
1613 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1614
1615 func->tlsBase = prog->tlsSize;
1616 if (!execFunc())
1617 return false;
1618 prog->tlsSize += func->tlsSize;
1619 }
1620 return true;
1621 }
1622
1623 bool
1624 RegAlloc::execFunc()
1625 {
1626 InsertConstraintsPass insertConstr;
1627 PhiMovesPass insertPhiMoves;
1628 ArgumentMovesPass insertArgMoves;
1629 BuildIntervalsPass buildIntervals;
1630 SpillCodeInserter insertSpills(func);
1631
1632 GCRA gcra(func, insertSpills);
1633
1634 unsigned int i, retries;
1635 bool ret;
1636
1637 if (!func->ins.empty()) {
1638 // Insert a nop at the entry so inputs only used by the first instruction
1639 // don't count as having an empty live range.
1640 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1641 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1642 }
1643
1644 ret = insertConstr.exec(func);
1645 if (!ret)
1646 goto out;
1647
1648 ret = insertPhiMoves.run(func);
1649 if (!ret)
1650 goto out;
1651
1652 ret = insertArgMoves.run(func);
1653 if (!ret)
1654 goto out;
1655
1656 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1657 for (retries = 0; retries < 3; ++retries) {
1658 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1659 INFO("Retry: %i\n", retries);
1660 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1661 func->print();
1662
1663 // spilling to registers may add live ranges, need to rebuild everything
1664 ret = true;
1665 for (sequence = func->cfg.nextSequence(), i = 0;
1666 ret && i <= func->loopNestingBound;
1667 sequence = func->cfg.nextSequence(), ++i)
1668 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1669 // reset marker
1670 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1671 !bi.end(); bi.next())
1672 BasicBlock::get(bi)->liveSet.marker = false;
1673 if (!ret)
1674 break;
1675 func->orderInstructions(this->insns);
1676
1677 ret = buildIntervals.run(func);
1678 if (!ret)
1679 break;
1680 ret = gcra.allocateRegisters(insns);
1681 if (ret)
1682 break; // success
1683 }
1684 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1685
1686 func->tlsSize = insertSpills.getStackSize();
1687 out:
1688 return ret;
1689 }
1690
1691 // TODO: check if modifying Instruction::join here breaks anything
1692 void
1693 GCRA::resolveSplitsAndMerges()
1694 {
1695 for (std::list<Instruction *>::iterator it = splits.begin();
1696 it != splits.end();
1697 ++it) {
1698 Instruction *split = *it;
1699 unsigned int reg = regs.idToBytes(split->getSrc(0));
1700 for (int d = 0; split->defExists(d); ++d) {
1701 Value *v = split->getDef(d);
1702 v->reg.data.id = regs.bytesToId(v, reg);
1703 v->join = v;
1704 reg += v->reg.size;
1705 }
1706 }
1707 splits.clear();
1708
1709 for (std::list<Instruction *>::iterator it = merges.begin();
1710 it != merges.end();
1711 ++it) {
1712 Instruction *merge = *it;
1713 unsigned int reg = regs.idToBytes(merge->getDef(0));
1714 for (int s = 0; merge->srcExists(s); ++s) {
1715 Value *v = merge->getSrc(s);
1716 v->reg.data.id = regs.bytesToId(v, reg);
1717 v->join = v;
1718 // If the value is defined by a phi/union node, we also need to
1719 // perform the same fixup on that node's sources, since after RA
1720 // their registers should be identical.
1721 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1722 Instruction *phi = v->getInsn();
1723 for (int phis = 0; phi->srcExists(phis); ++phis)
1724 phi->getSrc(phis)->join = v;
1725 }
1726 reg += v->reg.size;
1727 }
1728 }
1729 merges.clear();
1730 }
1731
1732 bool Program::registerAllocation()
1733 {
1734 RegAlloc ra(this);
1735 return ra.exec();
1736 }
1737
1738 bool
1739 RegAlloc::InsertConstraintsPass::exec(Function *ir)
1740 {
1741 constrList.clear();
1742
1743 bool ret = run(ir, true, true);
1744 if (ret)
1745 ret = insertConstraintMoves();
1746 return ret;
1747 }
1748
1749 // TODO: make part of texture insn
1750 void
1751 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
1752 {
1753 Value *def[4];
1754 int c, k, d;
1755 uint8_t mask = 0;
1756
1757 for (d = 0, k = 0, c = 0; c < 4; ++c) {
1758 if (!(tex->tex.mask & (1 << c)))
1759 continue;
1760 if (tex->getDef(k)->refCount()) {
1761 mask |= 1 << c;
1762 def[d++] = tex->getDef(k);
1763 }
1764 ++k;
1765 }
1766 tex->tex.mask = mask;
1767
1768 for (c = 0; c < d; ++c)
1769 tex->setDef(c, def[c]);
1770 for (; c < 4; ++c)
1771 tex->setDef(c, NULL);
1772 }
1773
1774 bool
1775 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
1776 {
1777 Value *v = cst->getSrc(s);
1778
1779 // current register allocation can't handle it if a value participates in
1780 // multiple constraints
1781 for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
1782 if (cst != (*it)->getInsn())
1783 return true;
1784 }
1785
1786 // can start at s + 1 because detectConflict is called on all sources
1787 for (int c = s + 1; cst->srcExists(c); ++c)
1788 if (v == cst->getSrc(c))
1789 return true;
1790
1791 Instruction *defi = v->getInsn();
1792
1793 return (!defi || defi->constrainedDefs());
1794 }
1795
1796 void
1797 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
1798 {
1799 Instruction *cst;
1800 int d;
1801
1802 // first, look for an existing identical constraint op
1803 for (std::list<Instruction *>::iterator it = constrList.begin();
1804 it != constrList.end();
1805 ++it) {
1806 cst = (*it);
1807 if (!i->bb->dominatedBy(cst->bb))
1808 break;
1809 for (d = 0; d < n; ++d)
1810 if (cst->getSrc(d) != i->getSrc(d + s))
1811 break;
1812 if (d >= n) {
1813 for (d = 0; d < n; ++d, ++s)
1814 i->setSrc(s, cst->getDef(d));
1815 return;
1816 }
1817 }
1818 cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
1819
1820 for (d = 0; d < n; ++s, ++d) {
1821 cst->setDef(d, new_LValue(func, FILE_GPR));
1822 cst->setSrc(d, i->getSrc(s));
1823 i->setSrc(s, cst->getDef(d));
1824 }
1825 i->bb->insertBefore(i, cst);
1826
1827 constrList.push_back(cst);
1828 }
1829
1830 // Add a dummy use of the pointer source of >= 8 byte loads after the load
1831 // to prevent it from being assigned a register which overlapping the load's
1832 // destination, which would produce random corruptions.
1833 void
1834 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
1835 {
1836 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
1837 hzd->setSrc(0, src->get());
1838 i->bb->insertAfter(i, hzd);
1839
1840 }
1841
1842 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
1843 void
1844 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
1845 {
1846 uint8_t size = 0;
1847 int n;
1848 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n)
1849 size += insn->getDef(n)->reg.size;
1850 if (n < 2)
1851 return;
1852 LValue *lval = new_LValue(func, FILE_GPR);
1853 lval->reg.size = size;
1854
1855 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
1856 split->setSrc(0, lval);
1857 for (int d = 0; d < n; ++d) {
1858 split->setDef(d, insn->getDef(d));
1859 insn->setDef(d, NULL);
1860 }
1861 insn->setDef(0, lval);
1862
1863 for (int k = 1, d = n; insn->defExists(d); ++d, ++k) {
1864 insn->setDef(k, insn->getDef(d));
1865 insn->setDef(d, NULL);
1866 }
1867 // carry over predicate if any (mainly for OP_UNION uses)
1868 split->setPredicate(insn->cc, insn->getPredicate());
1869
1870 insn->bb->insertAfter(insn, split);
1871 constrList.push_back(split);
1872 }
1873 void
1874 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
1875 const int a, const int b)
1876 {
1877 uint8_t size = 0;
1878 if (a >= b)
1879 return;
1880 for (int s = a; s <= b; ++s)
1881 size += insn->getSrc(s)->reg.size;
1882 if (!size)
1883 return;
1884 LValue *lval = new_LValue(func, FILE_GPR);
1885 lval->reg.size = size;
1886
1887 Value *save[3];
1888 insn->takeExtraSources(0, save);
1889
1890 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
1891 merge->setDef(0, lval);
1892 for (int s = a, i = 0; s <= b; ++s, ++i) {
1893 merge->setSrc(i, insn->getSrc(s));
1894 insn->setSrc(s, NULL);
1895 }
1896 insn->setSrc(a, lval);
1897
1898 for (int k = a + 1, s = b + 1; insn->srcExists(s); ++s, ++k) {
1899 insn->setSrc(k, insn->getSrc(s));
1900 insn->setSrc(s, NULL);
1901 }
1902 insn->bb->insertBefore(insn, merge);
1903
1904 insn->putExtraSources(0, save);
1905
1906 constrList.push_back(merge);
1907 }
1908
1909 void
1910 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
1911 {
1912 int n, s;
1913
1914 if (isTextureOp(tex->op))
1915 textureMask(tex);
1916 condenseDefs(tex);
1917
1918 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
1919 condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
1920 } else
1921 if (isTextureOp(tex->op)) {
1922 if (tex->op != OP_TXQ) {
1923 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
1924 if (tex->op == OP_TXD) {
1925 // Indirect handle belongs in the first arg
1926 if (tex->tex.rIndirectSrc >= 0)
1927 s++;
1928 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
1929 s++;
1930 }
1931 n = tex->srcCount(0xff) - s;
1932 } else {
1933 s = tex->srcCount(0xff);
1934 n = 0;
1935 }
1936
1937 if (s > 1)
1938 condenseSrcs(tex, 0, s - 1);
1939 if (n > 1) // NOTE: first call modified positions already
1940 condenseSrcs(tex, 1, n);
1941 }
1942 }
1943
1944 void
1945 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
1946 {
1947 if (isTextureOp(tex->op))
1948 textureMask(tex);
1949 condenseDefs(tex);
1950
1951 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
1952 condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
1953 } else
1954 if (isTextureOp(tex->op)) {
1955 int n = tex->srcCount(0xff, true);
1956 if (n > 4) {
1957 condenseSrcs(tex, 0, 3);
1958 if (n > 5) // NOTE: first call modified positions already
1959 condenseSrcs(tex, 4 - (4 - 1), n - 1 - (4 - 1));
1960 } else
1961 if (n > 1) {
1962 condenseSrcs(tex, 0, n - 1);
1963 }
1964 }
1965 }
1966
1967 void
1968 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
1969 {
1970 int n, s;
1971
1972 textureMask(tex);
1973
1974 if (tex->op == OP_TXQ) {
1975 s = tex->srcCount(0xff);
1976 n = 0;
1977 } else {
1978 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
1979 if (!tex->tex.target.isArray() &&
1980 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
1981 ++s;
1982 if (tex->op == OP_TXD && tex->tex.useOffsets)
1983 ++s;
1984 n = tex->srcCount(0xff) - s;
1985 assert(n <= 4);
1986 }
1987
1988 if (s > 1)
1989 condenseSrcs(tex, 0, s - 1);
1990 if (n > 1) // NOTE: first call modified positions already
1991 condenseSrcs(tex, 1, n);
1992
1993 condenseDefs(tex);
1994 }
1995
1996 void
1997 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
1998 {
1999 Value *pred = tex->getPredicate();
2000 if (pred)
2001 tex->setPredicate(tex->cc, NULL);
2002
2003 textureMask(tex);
2004
2005 assert(tex->defExists(0) && tex->srcExists(0));
2006 // make src and def count match
2007 int c;
2008 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2009 if (!tex->srcExists(c))
2010 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2011 if (!tex->defExists(c))
2012 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2013 }
2014 if (pred)
2015 tex->setPredicate(tex->cc, pred);
2016 condenseDefs(tex);
2017 condenseSrcs(tex, 0, c - 1);
2018 }
2019
2020 // Insert constraint markers for instructions whose multiple sources must be
2021 // located in consecutive registers.
2022 bool
2023 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2024 {
2025 TexInstruction *tex;
2026 Instruction *next;
2027 int s, size;
2028
2029 targ = bb->getProgram()->getTarget();
2030
2031 for (Instruction *i = bb->getEntry(); i; i = next) {
2032 next = i->next;
2033
2034 if ((tex = i->asTex())) {
2035 switch (targ->getChipset() & ~0xf) {
2036 case 0x50:
2037 case 0x80:
2038 case 0x90:
2039 case 0xa0:
2040 texConstraintNV50(tex);
2041 break;
2042 case 0xc0:
2043 case 0xd0:
2044 texConstraintNVC0(tex);
2045 break;
2046 case 0xe0:
2047 case 0xf0:
2048 case 0x100:
2049 texConstraintNVE0(tex);
2050 break;
2051 case 0x110:
2052 texConstraintGM107(tex);
2053 break;
2054 default:
2055 break;
2056 }
2057 } else
2058 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2059 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2060 assert(i->srcExists(s));
2061 size -= i->getSrc(s)->reg.size;
2062 }
2063 condenseSrcs(i, 1, s - 1);
2064 } else
2065 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2066 condenseDefs(i);
2067 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2068 addHazard(i, i->src(0).getIndirect(0));
2069 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2070 addHazard(i, i->src(0).getIndirect(1));
2071 } else
2072 if (i->op == OP_UNION ||
2073 i->op == OP_MERGE ||
2074 i->op == OP_SPLIT) {
2075 constrList.push_back(i);
2076 }
2077 }
2078 return true;
2079 }
2080
2081 // Insert extra moves so that, if multiple register constraints on a value are
2082 // in conflict, these conflicts can be resolved.
2083 bool
2084 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2085 {
2086 for (std::list<Instruction *>::iterator it = constrList.begin();
2087 it != constrList.end();
2088 ++it) {
2089 Instruction *cst = *it;
2090 Instruction *mov;
2091
2092 if (cst->op == OP_SPLIT && 0) {
2093 // spilling splits is annoying, just make sure they're separate
2094 for (int d = 0; cst->defExists(d); ++d) {
2095 if (!cst->getDef(d)->refCount())
2096 continue;
2097 LValue *lval = new_LValue(func, cst->def(d).getFile());
2098 const uint8_t size = cst->def(d).getSize();
2099 lval->reg.size = size;
2100
2101 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2102 mov->setSrc(0, lval);
2103 mov->setDef(0, cst->getDef(d));
2104 cst->setDef(d, mov->getSrc(0));
2105 cst->bb->insertAfter(cst, mov);
2106
2107 cst->getSrc(0)->asLValue()->noSpill = 1;
2108 mov->getSrc(0)->asLValue()->noSpill = 1;
2109 }
2110 } else
2111 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2112 for (int s = 0; cst->srcExists(s); ++s) {
2113 const uint8_t size = cst->src(s).getSize();
2114
2115 if (!cst->getSrc(s)->defs.size()) {
2116 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2117 mov->setDef(0, cst->getSrc(s));
2118 cst->bb->insertBefore(cst, mov);
2119 continue;
2120 }
2121 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2122
2123 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2124 // catch some cases where don't really need MOVs
2125 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs())
2126 continue;
2127
2128 LValue *lval = new_LValue(func, cst->src(s).getFile());
2129 lval->reg.size = size;
2130
2131 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2132 mov->setDef(0, lval);
2133 mov->setSrc(0, cst->getSrc(s));
2134 cst->setSrc(s, mov->getDef(0));
2135 cst->bb->insertBefore(cst, mov);
2136
2137 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2138
2139 if (cst->op == OP_UNION)
2140 mov->setPredicate(defi->cc, defi->getPredicate());
2141 }
2142 }
2143 }
2144
2145 return true;
2146 }
2147
2148 } // namespace nv50_ir