Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_ra.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25
26 #include <stack>
27 #include <limits>
28
29 namespace nv50_ir {
30
31 #define MAX_REGISTER_FILE_SIZE 256
32
33 class RegisterSet
34 {
35 public:
36 RegisterSet(const Target *);
37
38 void init(const Target *);
39 void reset(DataFile, bool resetMax = false);
40
41 void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
42 void intersect(DataFile f, const RegisterSet *);
43
44 bool assign(int32_t& reg, DataFile f, unsigned int size);
45 void release(DataFile f, int32_t reg, unsigned int size);
46 void occupy(DataFile f, int32_t reg, unsigned int size);
47 void occupy(const Value *);
48 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
49 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
50 bool testOccupy(const Value *);
51 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
52
53 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
54
55 inline unsigned int getFileSize(DataFile f, uint8_t regSize) const
56 {
57 if (restrictedGPR16Range && f == FILE_GPR && regSize == 2)
58 return (last[f] + 1) / 2;
59 return last[f] + 1;
60 }
61
62 inline unsigned int units(DataFile f, unsigned int size) const
63 {
64 return size >> unit[f];
65 }
66 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
67 inline unsigned int idToBytes(const Value *v) const
68 {
69 return v->reg.data.id * MIN2(v->reg.size, 4);
70 }
71 inline unsigned int idToUnits(const Value *v) const
72 {
73 return units(v->reg.file, idToBytes(v));
74 }
75 inline int bytesToId(Value *v, unsigned int bytes) const
76 {
77 if (v->reg.size < 4)
78 return units(v->reg.file, bytes);
79 return bytes / 4;
80 }
81 inline int unitsToId(DataFile f, int u, uint8_t size) const
82 {
83 if (u < 0)
84 return -1;
85 return (size < 4) ? u : ((u << unit[f]) / 4);
86 }
87
88 void print() const;
89
90 private:
91 BitSet bits[LAST_REGISTER_FILE + 1];
92
93 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
94
95 int last[LAST_REGISTER_FILE + 1];
96 int fill[LAST_REGISTER_FILE + 1];
97
98 const bool restrictedGPR16Range;
99 };
100
101 void
102 RegisterSet::reset(DataFile f, bool resetMax)
103 {
104 bits[f].fill(0);
105 if (resetMax)
106 fill[f] = -1;
107 }
108
109 void
110 RegisterSet::init(const Target *targ)
111 {
112 for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
113 DataFile f = static_cast<DataFile>(rf);
114 last[rf] = targ->getFileSize(f) - 1;
115 unit[rf] = targ->getFileUnit(f);
116 fill[rf] = -1;
117 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
118 bits[rf].allocate(last[rf] + 1, true);
119 }
120 }
121
122 RegisterSet::RegisterSet(const Target *targ)
123 : restrictedGPR16Range(targ->getChipset() < 0xc0)
124 {
125 init(targ);
126 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
127 reset(static_cast<DataFile>(i));
128 }
129
130 void
131 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
132 {
133 bits[f].periodicMask32(lock, unlock);
134 }
135
136 void
137 RegisterSet::intersect(DataFile f, const RegisterSet *set)
138 {
139 bits[f] |= set->bits[f];
140 }
141
142 void
143 RegisterSet::print() const
144 {
145 INFO("GPR:");
146 bits[FILE_GPR].print();
147 INFO("\n");
148 }
149
150 bool
151 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size)
152 {
153 reg = bits[f].findFreeRange(size);
154 if (reg < 0)
155 return false;
156 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
157 return true;
158 }
159
160 bool
161 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
162 {
163 return bits[f].testRange(reg, size);
164 }
165
166 void
167 RegisterSet::occupy(const Value *v)
168 {
169 occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
170 }
171
172 void
173 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
174 {
175 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
176 }
177
178 void
179 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
180 {
181 bits[f].setRange(reg, size);
182
183 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
184
185 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
186 }
187
188 bool
189 RegisterSet::testOccupy(const Value *v)
190 {
191 return testOccupy(v->reg.file,
192 idToUnits(v), v->reg.size >> unit[v->reg.file]);
193 }
194
195 bool
196 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
197 {
198 if (isOccupied(f, reg, size))
199 return false;
200 occupy(f, reg, size);
201 return true;
202 }
203
204 void
205 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
206 {
207 bits[f].clrRange(reg, size);
208
209 INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
210 }
211
212 class RegAlloc
213 {
214 public:
215 RegAlloc(Program *program) : prog(program), sequence(0) { }
216
217 bool exec();
218 bool execFunc();
219
220 private:
221 class PhiMovesPass : public Pass {
222 private:
223 virtual bool visit(BasicBlock *);
224 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
225 };
226
227 class ArgumentMovesPass : public Pass {
228 private:
229 virtual bool visit(BasicBlock *);
230 };
231
232 class BuildIntervalsPass : public Pass {
233 private:
234 virtual bool visit(BasicBlock *);
235 void collectLiveValues(BasicBlock *);
236 void addLiveRange(Value *, const BasicBlock *, int end);
237 };
238
239 class InsertConstraintsPass : public Pass {
240 public:
241 bool exec(Function *func);
242 private:
243 virtual bool visit(BasicBlock *);
244
245 bool insertConstraintMoves();
246
247 void condenseDefs(Instruction *);
248 void condenseSrcs(Instruction *, const int first, const int last);
249
250 void addHazard(Instruction *i, const ValueRef *src);
251 void textureMask(TexInstruction *);
252 void addConstraint(Instruction *, int s, int n);
253 bool detectConflict(Instruction *, int s);
254
255 // target specific functions, TODO: put in subclass or Target
256 void texConstraintNV50(TexInstruction *);
257 void texConstraintNVC0(TexInstruction *);
258 void texConstraintNVE0(TexInstruction *);
259 void texConstraintGM107(TexInstruction *);
260
261 std::list<Instruction *> constrList;
262
263 const Target *targ;
264 };
265
266 bool buildLiveSets(BasicBlock *);
267
268 private:
269 Program *prog;
270 Function *func;
271
272 // instructions in control flow / chronological order
273 ArrayList insns;
274
275 int sequence; // for manual passes through CFG
276 };
277
278 typedef std::pair<Value *, Value *> ValuePair;
279
280 class SpillCodeInserter
281 {
282 public:
283 SpillCodeInserter(Function *fn) : func(fn), stackSize(0), stackBase(0) { }
284
285 bool run(const std::list<ValuePair>&);
286
287 Symbol *assignSlot(const Interval&, const unsigned int size);
288 Value *offsetSlot(Value *, const LValue *);
289 inline int32_t getStackSize() const { return stackSize; }
290
291 private:
292 Function *func;
293
294 struct SpillSlot
295 {
296 Interval occup;
297 std::list<Value *> residents; // needed to recalculate occup
298 Symbol *sym;
299 int32_t offset;
300 inline uint8_t size() const { return sym->reg.size; }
301 };
302 std::list<SpillSlot> slots;
303 int32_t stackSize;
304 int32_t stackBase;
305
306 LValue *unspill(Instruction *usei, LValue *, Value *slot);
307 void spill(Instruction *defi, Value *slot, LValue *);
308 };
309
310 void
311 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
312 const BasicBlock *bb,
313 int end)
314 {
315 Instruction *insn = val->getUniqueInsn();
316
317 if (!insn)
318 insn = bb->getFirst();
319
320 assert(bb->getFirst()->serial <= bb->getExit()->serial);
321 assert(bb->getExit()->serial + 1 >= end);
322
323 int begin = insn->serial;
324 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
325 begin = bb->getEntry()->serial;
326
327 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
328 val->id, begin, insn->serial, end);
329
330 if (begin != end) // empty ranges are only added as hazards for fixed regs
331 val->livei.extend(begin, end);
332 }
333
334 bool
335 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
336 {
337 if (b->cfg.incidentCount() <= 1)
338 return false;
339
340 int n = 0;
341 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
342 if (ei.getType() == Graph::Edge::TREE ||
343 ei.getType() == Graph::Edge::FORWARD)
344 ++n;
345 return (n == 2);
346 }
347
348 // For each operand of each PHI in b, generate a new value by inserting a MOV
349 // at the end of the block it is coming from and replace the operand with its
350 // result. This eliminates liveness conflicts and enables us to let values be
351 // copied to the right register if such a conflict exists nonetheless.
352 //
353 // These MOVs are also crucial in making sure the live intervals of phi srces
354 // are extended until the end of the loop, since they are not included in the
355 // live-in sets.
356 bool
357 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
358 {
359 Instruction *phi, *mov;
360 BasicBlock *pb, *pn;
361
362 std::stack<BasicBlock *> stack;
363
364 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
365 pb = BasicBlock::get(ei.getNode());
366 assert(pb);
367 if (needNewElseBlock(bb, pb))
368 stack.push(pb);
369 }
370 while (!stack.empty()) {
371 pb = stack.top();
372 pn = new BasicBlock(func);
373 stack.pop();
374
375 pb->cfg.detach(&bb->cfg);
376 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
377 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
378
379 assert(pb->getExit()->op != OP_CALL);
380 if (pb->getExit()->asFlow()->target.bb == bb)
381 pb->getExit()->asFlow()->target.bb = pn;
382 }
383
384 // insert MOVs (phi->src(j) should stem from j-th in-BB)
385 int j = 0;
386 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
387 pb = BasicBlock::get(ei.getNode());
388 if (!pb->isTerminated())
389 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
390
391 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
392 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
393 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
394
395 mov->setSrc(0, phi->getSrc(j));
396 mov->setDef(0, tmp);
397 phi->setSrc(j, tmp);
398
399 pb->insertBefore(pb->getExit(), mov);
400 }
401 ++j;
402 }
403
404 return true;
405 }
406
407 bool
408 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
409 {
410 // Bind function call inputs/outputs to the same physical register
411 // the callee uses, inserting moves as appropriate for the case a
412 // conflict arises.
413 for (Instruction *i = bb->getEntry(); i; i = i->next) {
414 FlowInstruction *cal = i->asFlow();
415 // TODO: Handle indirect calls.
416 // Right now they should only be generated for builtins.
417 if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
418 continue;
419 RegisterSet clobberSet(prog->getTarget());
420
421 // Bind input values.
422 for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
423 const int t = cal->indirect ? (s - 1) : s;
424 LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
425 tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
426
427 Instruction *mov =
428 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
429 mov->setDef(0, tmp);
430 mov->setSrc(0, cal->getSrc(s));
431 cal->setSrc(s, tmp);
432
433 bb->insertBefore(cal, mov);
434 }
435
436 // Bind output values.
437 for (int d = 0; cal->defExists(d); ++d) {
438 LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
439 tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
440
441 Instruction *mov =
442 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
443 mov->setSrc(0, tmp);
444 mov->setDef(0, cal->getDef(d));
445 cal->setDef(d, tmp);
446
447 bb->insertAfter(cal, mov);
448 clobberSet.occupy(tmp);
449 }
450
451 // Bind clobbered values.
452 for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
453 it != cal->target.fn->clobbers.end();
454 ++it) {
455 if (clobberSet.testOccupy(*it)) {
456 Value *tmp = new_LValue(func, (*it)->asLValue());
457 tmp->reg.data.id = (*it)->reg.data.id;
458 cal->setDef(cal->defCount(), tmp);
459 }
460 }
461 }
462
463 // Update the clobber set of the function.
464 if (BasicBlock::get(func->cfgExit) == bb) {
465 func->buildDefSets();
466 for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
467 if (bb->defSet.test(i))
468 func->clobbers.push_back(func->getLValue(i));
469 }
470
471 return true;
472 }
473
474 // Build the set of live-in variables of bb.
475 bool
476 RegAlloc::buildLiveSets(BasicBlock *bb)
477 {
478 Function *f = bb->getFunction();
479 BasicBlock *bn;
480 Instruction *i;
481 unsigned int s, d;
482
483 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
484
485 bb->liveSet.allocate(func->allLValues.getSize(), false);
486
487 int n = 0;
488 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
489 bn = BasicBlock::get(ei.getNode());
490 if (bn == bb)
491 continue;
492 if (bn->cfg.visit(sequence))
493 if (!buildLiveSets(bn))
494 return false;
495 if (n++ || bb->liveSet.marker)
496 bb->liveSet |= bn->liveSet;
497 else
498 bb->liveSet = bn->liveSet;
499 }
500 if (!n && !bb->liveSet.marker)
501 bb->liveSet.fill(0);
502 bb->liveSet.marker = true;
503
504 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
505 INFO("BB:%i live set of out blocks:\n", bb->getId());
506 bb->liveSet.print();
507 }
508
509 // if (!bb->getEntry())
510 // return true;
511
512 if (bb == BasicBlock::get(f->cfgExit)) {
513 for (std::deque<ValueRef>::iterator it = f->outs.begin();
514 it != f->outs.end(); ++it) {
515 assert(it->get()->asLValue());
516 bb->liveSet.set(it->get()->id);
517 }
518 }
519
520 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
521 for (d = 0; i->defExists(d); ++d)
522 bb->liveSet.clr(i->getDef(d)->id);
523 for (s = 0; i->srcExists(s); ++s)
524 if (i->getSrc(s)->asLValue())
525 bb->liveSet.set(i->getSrc(s)->id);
526 }
527 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
528 bb->liveSet.clr(i->getDef(0)->id);
529
530 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
531 INFO("BB:%i live set after propagation:\n", bb->getId());
532 bb->liveSet.print();
533 }
534
535 return true;
536 }
537
538 void
539 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
540 {
541 BasicBlock *bbA = NULL, *bbB = NULL;
542
543 if (bb->cfg.outgoingCount()) {
544 // trickery to save a loop of OR'ing liveSets
545 // aliasing works fine with BitSet::setOr
546 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
547 if (ei.getType() == Graph::Edge::DUMMY)
548 continue;
549 if (bbA) {
550 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
551 bbA = bb;
552 } else {
553 bbA = bbB;
554 }
555 bbB = BasicBlock::get(ei.getNode());
556 }
557 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
558 } else
559 if (bb->cfg.incidentCount()) {
560 bb->liveSet.fill(0);
561 }
562 }
563
564 bool
565 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
566 {
567 collectLiveValues(bb);
568
569 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
570
571 // go through out blocks and delete phi sources that do not originate from
572 // the current block from the live set
573 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
574 BasicBlock *out = BasicBlock::get(ei.getNode());
575
576 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
577 bb->liveSet.clr(i->getDef(0)->id);
578
579 for (int s = 0; i->srcExists(s); ++s) {
580 assert(i->src(s).getInsn());
581 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
582 bb->liveSet.set(i->getSrc(s)->id);
583 else
584 bb->liveSet.clr(i->getSrc(s)->id);
585 }
586 }
587 }
588
589 // remaining live-outs are live until end
590 if (bb->getExit()) {
591 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
592 if (bb->liveSet.test(j))
593 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
594 }
595
596 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
597 for (int d = 0; i->defExists(d); ++d) {
598 bb->liveSet.clr(i->getDef(d)->id);
599 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
600 i->getDef(d)->livei.extend(i->serial, i->serial);
601 }
602
603 for (int s = 0; i->srcExists(s); ++s) {
604 if (!i->getSrc(s)->asLValue())
605 continue;
606 if (!bb->liveSet.test(i->getSrc(s)->id)) {
607 bb->liveSet.set(i->getSrc(s)->id);
608 addLiveRange(i->getSrc(s), bb, i->serial);
609 }
610 }
611 }
612
613 if (bb == BasicBlock::get(func->cfg.getRoot())) {
614 for (std::deque<ValueDef>::iterator it = func->ins.begin();
615 it != func->ins.end(); ++it) {
616 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
617 it->get()->livei.extend(0, 1);
618 }
619 }
620
621 return true;
622 }
623
624
625 #define JOIN_MASK_PHI (1 << 0)
626 #define JOIN_MASK_UNION (1 << 1)
627 #define JOIN_MASK_MOV (1 << 2)
628 #define JOIN_MASK_TEX (1 << 3)
629
630 class GCRA
631 {
632 public:
633 GCRA(Function *, SpillCodeInserter&);
634 ~GCRA();
635
636 bool allocateRegisters(ArrayList& insns);
637
638 void printNodeInfo() const;
639
640 private:
641 class RIG_Node : public Graph::Node
642 {
643 public:
644 RIG_Node();
645
646 void init(const RegisterSet&, LValue *);
647
648 void addInterference(RIG_Node *);
649 void addRegPreference(RIG_Node *);
650
651 inline LValue *getValue() const
652 {
653 return reinterpret_cast<LValue *>(data);
654 }
655 inline void setValue(LValue *lval) { data = lval; }
656
657 inline uint8_t getCompMask() const
658 {
659 return ((1 << colors) - 1) << (reg & 7);
660 }
661
662 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
663 {
664 return static_cast<RIG_Node *>(ei.getNode());
665 }
666
667 public:
668 uint32_t degree;
669 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
670 uint16_t colors;
671
672 DataFile f;
673 int32_t reg;
674
675 float weight;
676
677 // list pointers for simplify() phase
678 RIG_Node *next;
679 RIG_Node *prev;
680
681 // union of the live intervals of all coalesced values (we want to retain
682 // the separate intervals for testing interference of compound values)
683 Interval livei;
684
685 std::list<RIG_Node *> prefRegs;
686 };
687
688 private:
689 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
690
691 void buildRIG(ArrayList&);
692 bool coalesce(ArrayList&);
693 bool doCoalesce(ArrayList&, unsigned int mask);
694 void calculateSpillWeights();
695 void simplify();
696 bool selectRegisters();
697 void cleanup(const bool success);
698
699 void simplifyEdge(RIG_Node *, RIG_Node *);
700 void simplifyNode(RIG_Node *);
701
702 bool coalesceValues(Value *, Value *, bool force);
703 void resolveSplitsAndMerges();
704 void makeCompound(Instruction *, bool isSplit);
705
706 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
707
708 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
709 void checkList(std::list<RIG_Node *>&);
710
711 private:
712 std::stack<uint32_t> stack;
713
714 // list headers for simplify() phase
715 RIG_Node lo[2];
716 RIG_Node hi;
717
718 Graph RIG;
719 RIG_Node *nodes;
720 unsigned int nodeCount;
721
722 Function *func;
723 Program *prog;
724
725 static uint8_t relDegree[17][17];
726
727 RegisterSet regs;
728
729 // need to fixup register id for participants of OP_MERGE/SPLIT
730 std::list<Instruction *> merges;
731 std::list<Instruction *> splits;
732
733 SpillCodeInserter& spill;
734 std::list<ValuePair> mustSpill;
735 };
736
737 uint8_t GCRA::relDegree[17][17];
738
739 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
740 {
741 colors = 0;
742 }
743
744 void
745 GCRA::printNodeInfo() const
746 {
747 for (unsigned int i = 0; i < nodeCount; ++i) {
748 if (!nodes[i].colors)
749 continue;
750 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
751 i,
752 nodes[i].f,nodes[i].reg,nodes[i].colors,
753 nodes[i].weight,
754 nodes[i].degree, nodes[i].degreeLimit);
755
756 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
757 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
758 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
759 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
760 INFO("\n");
761 }
762 }
763
764 void
765 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
766 {
767 setValue(lval);
768 if (lval->reg.data.id >= 0)
769 lval->noSpill = lval->fixedReg = 1;
770
771 colors = regs.units(lval->reg.file, lval->reg.size);
772 f = lval->reg.file;
773 reg = -1;
774 if (lval->reg.data.id >= 0)
775 reg = regs.idToUnits(lval);
776
777 weight = std::numeric_limits<float>::infinity();
778 degree = 0;
779 degreeLimit = regs.getFileSize(f, lval->reg.size);
780 degreeLimit -= relDegree[1][colors] - 1;
781
782 livei.insert(lval->livei);
783 }
784
785 bool
786 GCRA::coalesceValues(Value *dst, Value *src, bool force)
787 {
788 LValue *rep = dst->join->asLValue();
789 LValue *val = src->join->asLValue();
790
791 if (!force && val->reg.data.id >= 0) {
792 rep = src->join->asLValue();
793 val = dst->join->asLValue();
794 }
795 RIG_Node *nRep = &nodes[rep->id];
796 RIG_Node *nVal = &nodes[val->id];
797
798 if (src->reg.file != dst->reg.file) {
799 if (!force)
800 return false;
801 WARN("forced coalescing of values in different files !\n");
802 }
803 if (!force && dst->reg.size != src->reg.size)
804 return false;
805
806 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
807 if (force) {
808 if (val->reg.data.id >= 0)
809 WARN("forced coalescing of values in different fixed regs !\n");
810 } else {
811 if (val->reg.data.id >= 0)
812 return false;
813 // make sure that there is no overlap with the fixed register of rep
814 for (ArrayList::Iterator it = func->allLValues.iterator();
815 !it.end(); it.next()) {
816 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
817 assert(reg);
818 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
819 return false;
820 }
821 }
822 }
823
824 if (!force && nRep->livei.overlaps(nVal->livei))
825 return false;
826
827 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
828 rep->id, rep->reg.data.id, val->id);
829
830 // set join pointer of all values joined with val
831 for (Value::DefIterator def = val->defs.begin(); def != val->defs.end();
832 ++def)
833 (*def)->get()->join = rep;
834 assert(rep->join == rep && val->join == rep);
835
836 // add val's definitions to rep and extend the live interval of its RIG node
837 rep->defs.insert(rep->defs.end(), val->defs.begin(), val->defs.end());
838 nRep->livei.unify(nVal->livei);
839 return true;
840 }
841
842 bool
843 GCRA::coalesce(ArrayList& insns)
844 {
845 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
846 if (!ret)
847 return false;
848 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
849 case 0x50:
850 case 0x80:
851 case 0x90:
852 case 0xa0:
853 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
854 break;
855 case 0xc0:
856 case 0xd0:
857 case 0xe0:
858 case 0xf0:
859 case 0x100:
860 case 0x110:
861 ret = doCoalesce(insns, JOIN_MASK_UNION);
862 break;
863 default:
864 break;
865 }
866 if (!ret)
867 return false;
868 return doCoalesce(insns, JOIN_MASK_MOV);
869 }
870
871 static inline uint8_t makeCompMask(int compSize, int base, int size)
872 {
873 uint8_t m = ((1 << size) - 1) << base;
874
875 switch (compSize) {
876 case 1:
877 return 0xff;
878 case 2:
879 m |= (m << 2);
880 return (m << 4) | m;
881 case 3:
882 case 4:
883 return (m << 4) | m;
884 default:
885 assert(compSize <= 8);
886 return m;
887 }
888 }
889
890 // Used when coalescing moves. The non-compound value will become one, e.g.:
891 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
892 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
893 static inline void copyCompound(Value *dst, Value *src)
894 {
895 LValue *ldst = dst->asLValue();
896 LValue *lsrc = src->asLValue();
897
898 if (ldst->compound && !lsrc->compound) {
899 LValue *swap = lsrc;
900 lsrc = ldst;
901 ldst = swap;
902 }
903
904 ldst->compound = lsrc->compound;
905 ldst->compMask = lsrc->compMask;
906 }
907
908 void
909 GCRA::makeCompound(Instruction *insn, bool split)
910 {
911 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
912
913 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
914 INFO("makeCompound(split = %i): ", split);
915 insn->print();
916 }
917
918 const unsigned int size = getNode(rep)->colors;
919 unsigned int base = 0;
920
921 if (!rep->compound)
922 rep->compMask = 0xff;
923 rep->compound = 1;
924
925 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
926 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
927
928 val->compound = 1;
929 if (!val->compMask)
930 val->compMask = 0xff;
931 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
932 assert(val->compMask);
933
934 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
935 rep->id, rep->compMask, val->id, val->compMask);
936
937 base += getNode(val)->colors;
938 }
939 assert(base == size);
940 }
941
942 bool
943 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
944 {
945 int c, n;
946
947 for (n = 0; n < insns.getSize(); ++n) {
948 Instruction *i;
949 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
950
951 switch (insn->op) {
952 case OP_PHI:
953 if (!(mask & JOIN_MASK_PHI))
954 break;
955 for (c = 0; insn->srcExists(c); ++c)
956 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
957 // this is bad
958 ERROR("failed to coalesce phi operands\n");
959 return false;
960 }
961 break;
962 case OP_UNION:
963 case OP_MERGE:
964 if (!(mask & JOIN_MASK_UNION))
965 break;
966 for (c = 0; insn->srcExists(c); ++c)
967 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
968 if (insn->op == OP_MERGE) {
969 merges.push_back(insn);
970 if (insn->srcExists(1))
971 makeCompound(insn, false);
972 }
973 break;
974 case OP_SPLIT:
975 if (!(mask & JOIN_MASK_UNION))
976 break;
977 splits.push_back(insn);
978 for (c = 0; insn->defExists(c); ++c)
979 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
980 makeCompound(insn, true);
981 break;
982 case OP_MOV:
983 if (!(mask & JOIN_MASK_MOV))
984 break;
985 i = NULL;
986 if (!insn->getDef(0)->uses.empty())
987 i = (*insn->getDef(0)->uses.begin())->getInsn();
988 // if this is a contraint-move there will only be a single use
989 if (i && i->op == OP_MERGE) // do we really still need this ?
990 break;
991 i = insn->getSrc(0)->getUniqueInsn();
992 if (i && !i->constrainedDefs()) {
993 if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
994 copyCompound(insn->getSrc(0), insn->getDef(0));
995 }
996 break;
997 case OP_TEX:
998 case OP_TXB:
999 case OP_TXL:
1000 case OP_TXF:
1001 case OP_TXQ:
1002 case OP_TXD:
1003 case OP_TXG:
1004 case OP_TXLQ:
1005 case OP_TEXCSAA:
1006 case OP_TEXPREP:
1007 if (!(mask & JOIN_MASK_TEX))
1008 break;
1009 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1010 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1011 break;
1012 default:
1013 break;
1014 }
1015 }
1016 return true;
1017 }
1018
1019 void
1020 GCRA::RIG_Node::addInterference(RIG_Node *node)
1021 {
1022 this->degree += relDegree[node->colors][colors];
1023 node->degree += relDegree[colors][node->colors];
1024
1025 this->attach(node, Graph::Edge::CROSS);
1026 }
1027
1028 void
1029 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1030 {
1031 prefRegs.push_back(node);
1032 }
1033
1034 GCRA::GCRA(Function *fn, SpillCodeInserter& spill) :
1035 func(fn),
1036 regs(fn->getProgram()->getTarget()),
1037 spill(spill)
1038 {
1039 prog = func->getProgram();
1040
1041 // initialize relative degrees array - i takes away from j
1042 for (int i = 1; i <= 16; ++i)
1043 for (int j = 1; j <= 16; ++j)
1044 relDegree[i][j] = j * ((i + j - 1) / j);
1045 }
1046
1047 GCRA::~GCRA()
1048 {
1049 if (nodes)
1050 delete[] nodes;
1051 }
1052
1053 void
1054 GCRA::checkList(std::list<RIG_Node *>& lst)
1055 {
1056 GCRA::RIG_Node *prev = NULL;
1057
1058 for (std::list<RIG_Node *>::iterator it = lst.begin();
1059 it != lst.end();
1060 ++it) {
1061 assert((*it)->getValue()->join == (*it)->getValue());
1062 if (prev)
1063 assert(prev->livei.begin() <= (*it)->livei.begin());
1064 prev = *it;
1065 }
1066 }
1067
1068 void
1069 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1070 {
1071 if (node->livei.isEmpty())
1072 return;
1073 // only the intervals of joined values don't necessarily arrive in order
1074 std::list<RIG_Node *>::iterator prev, it;
1075 for (it = list.end(); it != list.begin(); it = prev) {
1076 prev = it;
1077 --prev;
1078 if ((*prev)->livei.begin() <= node->livei.begin())
1079 break;
1080 }
1081 list.insert(it, node);
1082 }
1083
1084 void
1085 GCRA::buildRIG(ArrayList& insns)
1086 {
1087 std::list<RIG_Node *> values, active;
1088
1089 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1090 it != func->ins.end(); ++it)
1091 insertOrderedTail(values, getNode(it->get()->asLValue()));
1092
1093 for (int i = 0; i < insns.getSize(); ++i) {
1094 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1095 for (int d = 0; insn->defExists(d); ++d)
1096 if (insn->getDef(d)->rep() == insn->getDef(d))
1097 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1098 }
1099 checkList(values);
1100
1101 while (!values.empty()) {
1102 RIG_Node *cur = values.front();
1103
1104 for (std::list<RIG_Node *>::iterator it = active.begin();
1105 it != active.end();) {
1106 RIG_Node *node = *it;
1107
1108 if (node->livei.end() <= cur->livei.begin()) {
1109 it = active.erase(it);
1110 } else {
1111 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1112 cur->addInterference(node);
1113 ++it;
1114 }
1115 }
1116 values.pop_front();
1117 active.push_back(cur);
1118 }
1119 }
1120
1121 void
1122 GCRA::calculateSpillWeights()
1123 {
1124 for (unsigned int i = 0; i < nodeCount; ++i) {
1125 RIG_Node *const n = &nodes[i];
1126 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1127 continue;
1128 if (nodes[i].reg >= 0) {
1129 // update max reg
1130 regs.occupy(n->f, n->reg, n->colors);
1131 continue;
1132 }
1133 LValue *val = nodes[i].getValue();
1134
1135 if (!val->noSpill) {
1136 int rc = 0;
1137 for (Value::DefIterator it = val->defs.begin();
1138 it != val->defs.end();
1139 ++it)
1140 rc += (*it)->get()->refCount();
1141
1142 nodes[i].weight =
1143 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1144 }
1145
1146 if (nodes[i].degree < nodes[i].degreeLimit) {
1147 int l = 0;
1148 if (val->reg.size > 4)
1149 l = 1;
1150 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1151 } else {
1152 DLLIST_ADDHEAD(&hi, &nodes[i]);
1153 }
1154 }
1155 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1156 printNodeInfo();
1157 }
1158
1159 void
1160 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1161 {
1162 bool move = b->degree >= b->degreeLimit;
1163
1164 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1165 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1166 a->getValue()->id, a->degree, a->degreeLimit,
1167 b->getValue()->id, b->degree, b->degreeLimit);
1168
1169 b->degree -= relDegree[a->colors][b->colors];
1170
1171 move = move && b->degree < b->degreeLimit;
1172 if (move && !DLLIST_EMPTY(b)) {
1173 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1174 DLLIST_DEL(b);
1175 DLLIST_ADDTAIL(&lo[l], b);
1176 }
1177 }
1178
1179 void
1180 GCRA::simplifyNode(RIG_Node *node)
1181 {
1182 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1183 simplifyEdge(node, RIG_Node::get(ei));
1184
1185 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1186 simplifyEdge(node, RIG_Node::get(ei));
1187
1188 DLLIST_DEL(node);
1189 stack.push(node->getValue()->id);
1190
1191 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1192 node->getValue()->id,
1193 (node->degree < node->degreeLimit) ? "" : "(spill)");
1194 }
1195
1196 void
1197 GCRA::simplify()
1198 {
1199 for (;;) {
1200 if (!DLLIST_EMPTY(&lo[0])) {
1201 do {
1202 simplifyNode(lo[0].next);
1203 } while (!DLLIST_EMPTY(&lo[0]));
1204 } else
1205 if (!DLLIST_EMPTY(&lo[1])) {
1206 simplifyNode(lo[1].next);
1207 } else
1208 if (!DLLIST_EMPTY(&hi)) {
1209 RIG_Node *best = hi.next;
1210 float bestScore = best->weight / (float)best->degree;
1211 // spill candidate
1212 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1213 float score = it->weight / (float)it->degree;
1214 if (score < bestScore) {
1215 best = it;
1216 bestScore = score;
1217 }
1218 }
1219 if (isinf(bestScore)) {
1220 ERROR("no viable spill candidates left\n");
1221 break;
1222 }
1223 simplifyNode(best);
1224 } else {
1225 break;
1226 }
1227 }
1228 }
1229
1230 void
1231 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1232 {
1233 const RIG_Node *intf = RIG_Node::get(ei);
1234
1235 if (intf->reg < 0)
1236 return;
1237 const LValue *vA = node->getValue();
1238 const LValue *vB = intf->getValue();
1239
1240 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1241
1242 if (vA->compound | vB->compound) {
1243 // NOTE: this only works for >aligned< register tuples !
1244 for (Value::DefCIterator D = vA->defs.begin(); D != vA->defs.end(); ++D) {
1245 for (Value::DefCIterator d = vB->defs.begin(); d != vB->defs.end(); ++d) {
1246 const LValue *vD = (*D)->get()->asLValue();
1247 const LValue *vd = (*d)->get()->asLValue();
1248
1249 if (!vD->livei.overlaps(vd->livei)) {
1250 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1251 vD->id, vd->id);
1252 continue;
1253 }
1254
1255 uint8_t mask = vD->compound ? vD->compMask : ~0;
1256 if (vd->compound) {
1257 assert(vB->compound);
1258 mask &= vd->compMask & vB->compMask;
1259 } else {
1260 mask &= intfMask;
1261 }
1262
1263 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1264 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1265 vD->id,
1266 vD->compound ? vD->compMask : 0xff,
1267 vd->id,
1268 vd->compound ? vd->compMask : intfMask,
1269 vB->compMask, intf->reg & ~7, mask);
1270 if (mask)
1271 regs.occupyMask(node->f, intf->reg & ~7, mask);
1272 }
1273 }
1274 } else {
1275 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1276 "(%%%i) X (%%%i): $r%i + %u\n",
1277 vA->id, vB->id, intf->reg, intf->colors);
1278 regs.occupy(node->f, intf->reg, intf->colors);
1279 }
1280 }
1281
1282 bool
1283 GCRA::selectRegisters()
1284 {
1285 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1286
1287 while (!stack.empty()) {
1288 RIG_Node *node = &nodes[stack.top()];
1289 stack.pop();
1290
1291 regs.reset(node->f);
1292
1293 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1294 node->getValue()->id, node->colors);
1295
1296 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1297 checkInterference(node, ei);
1298 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1299 checkInterference(node, ei);
1300
1301 if (!node->prefRegs.empty()) {
1302 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1303 it != node->prefRegs.end();
1304 ++it) {
1305 if ((*it)->reg >= 0 &&
1306 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1307 node->reg = (*it)->reg;
1308 break;
1309 }
1310 }
1311 }
1312 if (node->reg >= 0)
1313 continue;
1314 LValue *lval = node->getValue();
1315 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1316 regs.print();
1317 bool ret = regs.assign(node->reg, node->f, node->colors);
1318 if (ret) {
1319 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1320 lval->compMask = node->getCompMask();
1321 } else {
1322 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1323 lval->id, lval->reg.size);
1324 Symbol *slot = NULL;
1325 if (lval->reg.file == FILE_GPR)
1326 slot = spill.assignSlot(node->livei, lval->reg.size);
1327 mustSpill.push_back(ValuePair(lval, slot));
1328 }
1329 }
1330 if (!mustSpill.empty())
1331 return false;
1332 for (unsigned int i = 0; i < nodeCount; ++i) {
1333 LValue *lval = nodes[i].getValue();
1334 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1335 lval->reg.data.id =
1336 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1337 }
1338 return true;
1339 }
1340
1341 bool
1342 GCRA::allocateRegisters(ArrayList& insns)
1343 {
1344 bool ret;
1345
1346 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1347 "allocateRegisters to %u instructions\n", insns.getSize());
1348
1349 nodeCount = func->allLValues.getSize();
1350 nodes = new RIG_Node[nodeCount];
1351 if (!nodes)
1352 return false;
1353 for (unsigned int i = 0; i < nodeCount; ++i) {
1354 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1355 if (lval) {
1356 nodes[i].init(regs, lval);
1357 RIG.insert(&nodes[i]);
1358 }
1359 }
1360
1361 // coalesce first, we use only 1 RIG node for a group of joined values
1362 ret = coalesce(insns);
1363 if (!ret)
1364 goto out;
1365
1366 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1367 func->printLiveIntervals();
1368
1369 buildRIG(insns);
1370 calculateSpillWeights();
1371 simplify();
1372
1373 ret = selectRegisters();
1374 if (!ret) {
1375 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1376 "selectRegisters failed, inserting spill code ...\n");
1377 regs.reset(FILE_GPR, true);
1378 spill.run(mustSpill);
1379 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1380 func->print();
1381 } else {
1382 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1383 }
1384
1385 out:
1386 cleanup(ret);
1387 return ret;
1388 }
1389
1390 void
1391 GCRA::cleanup(const bool success)
1392 {
1393 mustSpill.clear();
1394
1395 for (ArrayList::Iterator it = func->allLValues.iterator();
1396 !it.end(); it.next()) {
1397 LValue *lval = reinterpret_cast<LValue *>(it.get());
1398
1399 lval->livei.clear();
1400
1401 lval->compound = 0;
1402 lval->compMask = 0;
1403
1404 if (lval->join == lval)
1405 continue;
1406
1407 if (success) {
1408 lval->reg.data.id = lval->join->reg.data.id;
1409 } else {
1410 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1411 ++d)
1412 lval->join->defs.remove(*d);
1413 lval->join = lval;
1414 }
1415 }
1416
1417 if (success)
1418 resolveSplitsAndMerges();
1419 splits.clear(); // avoid duplicate entries on next coalesce pass
1420 merges.clear();
1421
1422 delete[] nodes;
1423 nodes = NULL;
1424 }
1425
1426 Symbol *
1427 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1428 {
1429 SpillSlot slot;
1430 int32_t offsetBase = stackSize;
1431 int32_t offset;
1432 std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1433
1434 if (offsetBase % size)
1435 offsetBase += size - (offsetBase % size);
1436
1437 slot.sym = NULL;
1438
1439 for (offset = offsetBase; offset < stackSize; offset += size) {
1440 const int32_t entryEnd = offset + size;
1441 while (it != slots.end() && it->offset < offset)
1442 ++it;
1443 if (it == slots.end()) // no slots left
1444 break;
1445 std::list<SpillSlot>::iterator bgn = it;
1446
1447 while (it != slots.end() && it->offset < entryEnd) {
1448 it->occup.print();
1449 if (it->occup.overlaps(livei))
1450 break;
1451 ++it;
1452 }
1453 if (it == slots.end() || it->offset >= entryEnd) {
1454 // fits
1455 for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1456 bgn->occup.insert(livei);
1457 if (bgn->size() == size)
1458 slot.sym = bgn->sym;
1459 }
1460 break;
1461 }
1462 }
1463 if (!slot.sym) {
1464 stackSize = offset + size;
1465 slot.offset = offset;
1466 slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1467 if (!func->stackPtr)
1468 offset += func->tlsBase;
1469 slot.sym->setAddress(NULL, offset);
1470 slot.sym->reg.size = size;
1471 slots.insert(pos, slot)->occup.insert(livei);
1472 }
1473 return slot.sym;
1474 }
1475
1476 Value *
1477 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1478 {
1479 if (!lval->compound || (lval->compMask & 0x1))
1480 return base;
1481 Value *slot = cloneShallow(func, base);
1482
1483 slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1484 slot->reg.size = lval->reg.size;
1485
1486 return slot;
1487 }
1488
1489 void
1490 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1491 {
1492 const DataType ty = typeOfSize(lval->reg.size);
1493
1494 slot = offsetSlot(slot, lval);
1495
1496 Instruction *st;
1497 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1498 st = new_Instruction(func, OP_STORE, ty);
1499 st->setSrc(0, slot);
1500 st->setSrc(1, lval);
1501 lval->noSpill = 1;
1502 } else {
1503 st = new_Instruction(func, OP_CVT, ty);
1504 st->setDef(0, slot);
1505 st->setSrc(0, lval);
1506 }
1507 defi->bb->insertAfter(defi, st);
1508 }
1509
1510 LValue *
1511 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1512 {
1513 const DataType ty = typeOfSize(lval->reg.size);
1514
1515 slot = offsetSlot(slot, lval);
1516 lval = cloneShallow(func, lval);
1517
1518 Instruction *ld;
1519 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1520 lval->noSpill = 1;
1521 ld = new_Instruction(func, OP_LOAD, ty);
1522 } else {
1523 ld = new_Instruction(func, OP_CVT, ty);
1524 }
1525 ld->setDef(0, lval);
1526 ld->setSrc(0, slot);
1527
1528 usei->bb->insertBefore(usei, ld);
1529 return lval;
1530 }
1531
1532
1533 // For each value that is to be spilled, go through all its definitions.
1534 // A value can have multiple definitions if it has been coalesced before.
1535 // For each definition, first go through all its uses and insert an unspill
1536 // instruction before it, then replace the use with the temporary register.
1537 // Unspill can be either a load from memory or simply a move to another
1538 // register file.
1539 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1540 // if we have spilled to a memory location, or simply with the new register.
1541 // No load or conversion instruction should be needed.
1542 bool
1543 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1544 {
1545 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1546 ++it) {
1547 LValue *lval = it->first->asLValue();
1548 Symbol *mem = it->second ? it->second->asSym() : NULL;
1549
1550 // Keep track of which instructions to delete later. Deleting them
1551 // inside the loop is unsafe since a single instruction may have
1552 // multiple destinations that all need to be spilled (like OP_SPLIT).
1553 unordered_set<Instruction *> to_del;
1554
1555 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1556 ++d) {
1557 Value *slot = mem ?
1558 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1559 Value *tmp = NULL;
1560 Instruction *last = NULL;
1561
1562 LValue *dval = (*d)->get()->asLValue();
1563 Instruction *defi = (*d)->getInsn();
1564
1565 // Unspill at each use *before* inserting spill instructions,
1566 // we don't want to have the spill instructions in the use list here.
1567 while (!dval->uses.empty()) {
1568 ValueRef *u = *dval->uses.begin();
1569 Instruction *usei = u->getInsn();
1570 assert(usei);
1571 if (usei->isPseudo()) {
1572 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1573 last = NULL;
1574 } else
1575 if (!last || usei != last->next) { // TODO: sort uses
1576 tmp = unspill(usei, dval, slot);
1577 last = usei;
1578 }
1579 u->set(tmp);
1580 }
1581
1582 assert(defi);
1583 if (defi->isPseudo()) {
1584 d = lval->defs.erase(d);
1585 --d;
1586 if (slot->reg.file == FILE_MEMORY_LOCAL)
1587 to_del.insert(defi);
1588 else
1589 defi->setDef(0, slot);
1590 } else {
1591 spill(defi, slot, dval);
1592 }
1593 }
1594
1595 for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
1596 it != to_del.end(); ++it)
1597 delete_Instruction(func->getProgram(), *it);
1598 }
1599
1600 // TODO: We're not trying to reuse old slots in a potential next iteration.
1601 // We have to update the slots' livei intervals to be able to do that.
1602 stackBase = stackSize;
1603 slots.clear();
1604 return true;
1605 }
1606
1607 bool
1608 RegAlloc::exec()
1609 {
1610 for (IteratorRef it = prog->calls.iteratorDFS(false);
1611 !it->end(); it->next()) {
1612 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1613
1614 func->tlsBase = prog->tlsSize;
1615 if (!execFunc())
1616 return false;
1617 prog->tlsSize += func->tlsSize;
1618 }
1619 return true;
1620 }
1621
1622 bool
1623 RegAlloc::execFunc()
1624 {
1625 InsertConstraintsPass insertConstr;
1626 PhiMovesPass insertPhiMoves;
1627 ArgumentMovesPass insertArgMoves;
1628 BuildIntervalsPass buildIntervals;
1629 SpillCodeInserter insertSpills(func);
1630
1631 GCRA gcra(func, insertSpills);
1632
1633 unsigned int i, retries;
1634 bool ret;
1635
1636 if (!func->ins.empty()) {
1637 // Insert a nop at the entry so inputs only used by the first instruction
1638 // don't count as having an empty live range.
1639 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1640 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1641 }
1642
1643 ret = insertConstr.exec(func);
1644 if (!ret)
1645 goto out;
1646
1647 ret = insertPhiMoves.run(func);
1648 if (!ret)
1649 goto out;
1650
1651 ret = insertArgMoves.run(func);
1652 if (!ret)
1653 goto out;
1654
1655 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1656 for (retries = 0; retries < 3; ++retries) {
1657 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1658 INFO("Retry: %i\n", retries);
1659 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1660 func->print();
1661
1662 // spilling to registers may add live ranges, need to rebuild everything
1663 ret = true;
1664 for (sequence = func->cfg.nextSequence(), i = 0;
1665 ret && i <= func->loopNestingBound;
1666 sequence = func->cfg.nextSequence(), ++i)
1667 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1668 // reset marker
1669 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1670 !bi.end(); bi.next())
1671 BasicBlock::get(bi)->liveSet.marker = false;
1672 if (!ret)
1673 break;
1674 func->orderInstructions(this->insns);
1675
1676 ret = buildIntervals.run(func);
1677 if (!ret)
1678 break;
1679 ret = gcra.allocateRegisters(insns);
1680 if (ret)
1681 break; // success
1682 }
1683 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1684
1685 func->tlsSize = insertSpills.getStackSize();
1686 out:
1687 return ret;
1688 }
1689
1690 // TODO: check if modifying Instruction::join here breaks anything
1691 void
1692 GCRA::resolveSplitsAndMerges()
1693 {
1694 for (std::list<Instruction *>::iterator it = splits.begin();
1695 it != splits.end();
1696 ++it) {
1697 Instruction *split = *it;
1698 unsigned int reg = regs.idToBytes(split->getSrc(0));
1699 for (int d = 0; split->defExists(d); ++d) {
1700 Value *v = split->getDef(d);
1701 v->reg.data.id = regs.bytesToId(v, reg);
1702 v->join = v;
1703 reg += v->reg.size;
1704 }
1705 }
1706 splits.clear();
1707
1708 for (std::list<Instruction *>::iterator it = merges.begin();
1709 it != merges.end();
1710 ++it) {
1711 Instruction *merge = *it;
1712 unsigned int reg = regs.idToBytes(merge->getDef(0));
1713 for (int s = 0; merge->srcExists(s); ++s) {
1714 Value *v = merge->getSrc(s);
1715 v->reg.data.id = regs.bytesToId(v, reg);
1716 v->join = v;
1717 // If the value is defined by a phi/union node, we also need to
1718 // perform the same fixup on that node's sources, since after RA
1719 // their registers should be identical.
1720 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1721 Instruction *phi = v->getInsn();
1722 for (int phis = 0; phi->srcExists(phis); ++phis)
1723 phi->getSrc(phis)->join = v;
1724 }
1725 reg += v->reg.size;
1726 }
1727 }
1728 merges.clear();
1729 }
1730
1731 bool Program::registerAllocation()
1732 {
1733 RegAlloc ra(this);
1734 return ra.exec();
1735 }
1736
1737 bool
1738 RegAlloc::InsertConstraintsPass::exec(Function *ir)
1739 {
1740 constrList.clear();
1741
1742 bool ret = run(ir, true, true);
1743 if (ret)
1744 ret = insertConstraintMoves();
1745 return ret;
1746 }
1747
1748 // TODO: make part of texture insn
1749 void
1750 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
1751 {
1752 Value *def[4];
1753 int c, k, d;
1754 uint8_t mask = 0;
1755
1756 for (d = 0, k = 0, c = 0; c < 4; ++c) {
1757 if (!(tex->tex.mask & (1 << c)))
1758 continue;
1759 if (tex->getDef(k)->refCount()) {
1760 mask |= 1 << c;
1761 def[d++] = tex->getDef(k);
1762 }
1763 ++k;
1764 }
1765 tex->tex.mask = mask;
1766
1767 for (c = 0; c < d; ++c)
1768 tex->setDef(c, def[c]);
1769 for (; c < 4; ++c)
1770 tex->setDef(c, NULL);
1771 }
1772
1773 bool
1774 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
1775 {
1776 Value *v = cst->getSrc(s);
1777
1778 // current register allocation can't handle it if a value participates in
1779 // multiple constraints
1780 for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
1781 if (cst != (*it)->getInsn())
1782 return true;
1783 }
1784
1785 // can start at s + 1 because detectConflict is called on all sources
1786 for (int c = s + 1; cst->srcExists(c); ++c)
1787 if (v == cst->getSrc(c))
1788 return true;
1789
1790 Instruction *defi = v->getInsn();
1791
1792 return (!defi || defi->constrainedDefs());
1793 }
1794
1795 void
1796 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
1797 {
1798 Instruction *cst;
1799 int d;
1800
1801 // first, look for an existing identical constraint op
1802 for (std::list<Instruction *>::iterator it = constrList.begin();
1803 it != constrList.end();
1804 ++it) {
1805 cst = (*it);
1806 if (!i->bb->dominatedBy(cst->bb))
1807 break;
1808 for (d = 0; d < n; ++d)
1809 if (cst->getSrc(d) != i->getSrc(d + s))
1810 break;
1811 if (d >= n) {
1812 for (d = 0; d < n; ++d, ++s)
1813 i->setSrc(s, cst->getDef(d));
1814 return;
1815 }
1816 }
1817 cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
1818
1819 for (d = 0; d < n; ++s, ++d) {
1820 cst->setDef(d, new_LValue(func, FILE_GPR));
1821 cst->setSrc(d, i->getSrc(s));
1822 i->setSrc(s, cst->getDef(d));
1823 }
1824 i->bb->insertBefore(i, cst);
1825
1826 constrList.push_back(cst);
1827 }
1828
1829 // Add a dummy use of the pointer source of >= 8 byte loads after the load
1830 // to prevent it from being assigned a register which overlapping the load's
1831 // destination, which would produce random corruptions.
1832 void
1833 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
1834 {
1835 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
1836 hzd->setSrc(0, src->get());
1837 i->bb->insertAfter(i, hzd);
1838
1839 }
1840
1841 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
1842 void
1843 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
1844 {
1845 uint8_t size = 0;
1846 int n;
1847 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n)
1848 size += insn->getDef(n)->reg.size;
1849 if (n < 2)
1850 return;
1851 LValue *lval = new_LValue(func, FILE_GPR);
1852 lval->reg.size = size;
1853
1854 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
1855 split->setSrc(0, lval);
1856 for (int d = 0; d < n; ++d) {
1857 split->setDef(d, insn->getDef(d));
1858 insn->setDef(d, NULL);
1859 }
1860 insn->setDef(0, lval);
1861
1862 for (int k = 1, d = n; insn->defExists(d); ++d, ++k) {
1863 insn->setDef(k, insn->getDef(d));
1864 insn->setDef(d, NULL);
1865 }
1866 // carry over predicate if any (mainly for OP_UNION uses)
1867 split->setPredicate(insn->cc, insn->getPredicate());
1868
1869 insn->bb->insertAfter(insn, split);
1870 constrList.push_back(split);
1871 }
1872 void
1873 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
1874 const int a, const int b)
1875 {
1876 uint8_t size = 0;
1877 if (a >= b)
1878 return;
1879 for (int s = a; s <= b; ++s)
1880 size += insn->getSrc(s)->reg.size;
1881 if (!size)
1882 return;
1883 LValue *lval = new_LValue(func, FILE_GPR);
1884 lval->reg.size = size;
1885
1886 Value *save[3];
1887 insn->takeExtraSources(0, save);
1888
1889 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
1890 merge->setDef(0, lval);
1891 for (int s = a, i = 0; s <= b; ++s, ++i) {
1892 merge->setSrc(i, insn->getSrc(s));
1893 insn->setSrc(s, NULL);
1894 }
1895 insn->setSrc(a, lval);
1896
1897 for (int k = a + 1, s = b + 1; insn->srcExists(s); ++s, ++k) {
1898 insn->setSrc(k, insn->getSrc(s));
1899 insn->setSrc(s, NULL);
1900 }
1901 insn->bb->insertBefore(insn, merge);
1902
1903 insn->putExtraSources(0, save);
1904
1905 constrList.push_back(merge);
1906 }
1907
1908 void
1909 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
1910 {
1911 int n, s;
1912
1913 if (isTextureOp(tex->op))
1914 textureMask(tex);
1915 condenseDefs(tex);
1916
1917 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
1918 condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
1919 } else
1920 if (isTextureOp(tex->op)) {
1921 if (tex->op != OP_TXQ) {
1922 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
1923 if (tex->op == OP_TXD) {
1924 // Indirect handle belongs in the first arg
1925 if (tex->tex.rIndirectSrc >= 0)
1926 s++;
1927 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
1928 s++;
1929 }
1930 n = tex->srcCount(0xff) - s;
1931 } else {
1932 s = tex->srcCount(0xff);
1933 n = 0;
1934 }
1935
1936 if (s > 1)
1937 condenseSrcs(tex, 0, s - 1);
1938 if (n > 1) // NOTE: first call modified positions already
1939 condenseSrcs(tex, 1, n);
1940 }
1941 }
1942
1943 void
1944 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
1945 {
1946 if (isTextureOp(tex->op))
1947 textureMask(tex);
1948 condenseDefs(tex);
1949
1950 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
1951 condenseSrcs(tex, 3, (3 + typeSizeof(tex->dType) / 4) - 1);
1952 } else
1953 if (isTextureOp(tex->op)) {
1954 int n = tex->srcCount(0xff, true);
1955 if (n > 4) {
1956 condenseSrcs(tex, 0, 3);
1957 if (n > 5) // NOTE: first call modified positions already
1958 condenseSrcs(tex, 4 - (4 - 1), n - 1 - (4 - 1));
1959 } else
1960 if (n > 1) {
1961 condenseSrcs(tex, 0, n - 1);
1962 }
1963 }
1964 }
1965
1966 void
1967 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
1968 {
1969 int n, s;
1970
1971 textureMask(tex);
1972
1973 if (tex->op == OP_TXQ) {
1974 s = tex->srcCount(0xff);
1975 n = 0;
1976 } else {
1977 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
1978 if (!tex->tex.target.isArray() &&
1979 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
1980 ++s;
1981 if (tex->op == OP_TXD && tex->tex.useOffsets)
1982 ++s;
1983 n = tex->srcCount(0xff) - s;
1984 assert(n <= 4);
1985 }
1986
1987 if (s > 1)
1988 condenseSrcs(tex, 0, s - 1);
1989 if (n > 1) // NOTE: first call modified positions already
1990 condenseSrcs(tex, 1, n);
1991
1992 condenseDefs(tex);
1993 }
1994
1995 void
1996 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
1997 {
1998 Value *pred = tex->getPredicate();
1999 if (pred)
2000 tex->setPredicate(tex->cc, NULL);
2001
2002 textureMask(tex);
2003
2004 assert(tex->defExists(0) && tex->srcExists(0));
2005 // make src and def count match
2006 int c;
2007 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2008 if (!tex->srcExists(c))
2009 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2010 if (!tex->defExists(c))
2011 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2012 }
2013 if (pred)
2014 tex->setPredicate(tex->cc, pred);
2015 condenseDefs(tex);
2016 condenseSrcs(tex, 0, c - 1);
2017 }
2018
2019 // Insert constraint markers for instructions whose multiple sources must be
2020 // located in consecutive registers.
2021 bool
2022 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2023 {
2024 TexInstruction *tex;
2025 Instruction *next;
2026 int s, size;
2027
2028 targ = bb->getProgram()->getTarget();
2029
2030 for (Instruction *i = bb->getEntry(); i; i = next) {
2031 next = i->next;
2032
2033 if ((tex = i->asTex())) {
2034 switch (targ->getChipset() & ~0xf) {
2035 case 0x50:
2036 case 0x80:
2037 case 0x90:
2038 case 0xa0:
2039 texConstraintNV50(tex);
2040 break;
2041 case 0xc0:
2042 case 0xd0:
2043 texConstraintNVC0(tex);
2044 break;
2045 case 0xe0:
2046 case 0xf0:
2047 case 0x100:
2048 texConstraintNVE0(tex);
2049 break;
2050 case 0x110:
2051 texConstraintGM107(tex);
2052 break;
2053 default:
2054 break;
2055 }
2056 } else
2057 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2058 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2059 assert(i->srcExists(s));
2060 size -= i->getSrc(s)->reg.size;
2061 }
2062 condenseSrcs(i, 1, s - 1);
2063 } else
2064 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2065 condenseDefs(i);
2066 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2067 addHazard(i, i->src(0).getIndirect(0));
2068 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2069 addHazard(i, i->src(0).getIndirect(1));
2070 } else
2071 if (i->op == OP_UNION ||
2072 i->op == OP_MERGE ||
2073 i->op == OP_SPLIT) {
2074 constrList.push_back(i);
2075 }
2076 }
2077 return true;
2078 }
2079
2080 // Insert extra moves so that, if multiple register constraints on a value are
2081 // in conflict, these conflicts can be resolved.
2082 bool
2083 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2084 {
2085 for (std::list<Instruction *>::iterator it = constrList.begin();
2086 it != constrList.end();
2087 ++it) {
2088 Instruction *cst = *it;
2089 Instruction *mov;
2090
2091 if (cst->op == OP_SPLIT && 0) {
2092 // spilling splits is annoying, just make sure they're separate
2093 for (int d = 0; cst->defExists(d); ++d) {
2094 if (!cst->getDef(d)->refCount())
2095 continue;
2096 LValue *lval = new_LValue(func, cst->def(d).getFile());
2097 const uint8_t size = cst->def(d).getSize();
2098 lval->reg.size = size;
2099
2100 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2101 mov->setSrc(0, lval);
2102 mov->setDef(0, cst->getDef(d));
2103 cst->setDef(d, mov->getSrc(0));
2104 cst->bb->insertAfter(cst, mov);
2105
2106 cst->getSrc(0)->asLValue()->noSpill = 1;
2107 mov->getSrc(0)->asLValue()->noSpill = 1;
2108 }
2109 } else
2110 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2111 for (int s = 0; cst->srcExists(s); ++s) {
2112 const uint8_t size = cst->src(s).getSize();
2113
2114 if (!cst->getSrc(s)->defs.size()) {
2115 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2116 mov->setDef(0, cst->getSrc(s));
2117 cst->bb->insertBefore(cst, mov);
2118 continue;
2119 }
2120 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2121
2122 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2123 // catch some cases where don't really need MOVs
2124 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs())
2125 continue;
2126
2127 LValue *lval = new_LValue(func, cst->src(s).getFile());
2128 lval->reg.size = size;
2129
2130 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2131 mov->setDef(0, lval);
2132 mov->setSrc(0, cst->getSrc(s));
2133 cst->setSrc(s, mov->getDef(0));
2134 cst->bb->insertBefore(cst, mov);
2135
2136 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2137
2138 if (cst->op == OP_UNION)
2139 mov->setPredicate(defi->cc, defi->getPredicate());
2140 }
2141 }
2142 }
2143
2144 return true;
2145 }
2146
2147 } // namespace nv50_ir