nv50/ra: add condenseDef overloads for partial condenses
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_ra.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25
26 #include <algorithm>
27 #include <stack>
28 #include <limits>
29 #if __cplusplus >= 201103L
30 #include <unordered_map>
31 #else
32 #include <tr1/unordered_map>
33 #endif
34
35 namespace nv50_ir {
36
37 #if __cplusplus >= 201103L
38 using std::hash;
39 using std::unordered_map;
40 #else
41 using std::tr1::hash;
42 using std::tr1::unordered_map;
43 #endif
44
45 #define MAX_REGISTER_FILE_SIZE 256
46
47 class RegisterSet
48 {
49 public:
50 RegisterSet(const Target *);
51
52 void init(const Target *);
53 void reset(DataFile, bool resetMax = false);
54
55 void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
56 void intersect(DataFile f, const RegisterSet *);
57
58 bool assign(int32_t& reg, DataFile f, unsigned int size);
59 void release(DataFile f, int32_t reg, unsigned int size);
60 void occupy(DataFile f, int32_t reg, unsigned int size);
61 void occupy(const Value *);
62 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
63 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
64 bool testOccupy(const Value *);
65 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
66
67 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
68
69 inline unsigned int getFileSize(DataFile f, uint8_t regSize) const
70 {
71 if (restrictedGPR16Range && f == FILE_GPR && regSize == 2)
72 return (last[f] + 1) / 2;
73 return last[f] + 1;
74 }
75
76 inline unsigned int units(DataFile f, unsigned int size) const
77 {
78 return size >> unit[f];
79 }
80 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
81 inline unsigned int idToBytes(const Value *v) const
82 {
83 return v->reg.data.id * MIN2(v->reg.size, 4);
84 }
85 inline unsigned int idToUnits(const Value *v) const
86 {
87 return units(v->reg.file, idToBytes(v));
88 }
89 inline int bytesToId(Value *v, unsigned int bytes) const
90 {
91 if (v->reg.size < 4)
92 return units(v->reg.file, bytes);
93 return bytes / 4;
94 }
95 inline int unitsToId(DataFile f, int u, uint8_t size) const
96 {
97 if (u < 0)
98 return -1;
99 return (size < 4) ? u : ((u << unit[f]) / 4);
100 }
101
102 void print(DataFile f) const;
103
104 const bool restrictedGPR16Range;
105
106 private:
107 BitSet bits[LAST_REGISTER_FILE + 1];
108
109 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
110
111 int last[LAST_REGISTER_FILE + 1];
112 int fill[LAST_REGISTER_FILE + 1];
113 };
114
115 void
116 RegisterSet::reset(DataFile f, bool resetMax)
117 {
118 bits[f].fill(0);
119 if (resetMax)
120 fill[f] = -1;
121 }
122
123 void
124 RegisterSet::init(const Target *targ)
125 {
126 for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
127 DataFile f = static_cast<DataFile>(rf);
128 last[rf] = targ->getFileSize(f) - 1;
129 unit[rf] = targ->getFileUnit(f);
130 fill[rf] = -1;
131 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
132 bits[rf].allocate(last[rf] + 1, true);
133 }
134 }
135
136 RegisterSet::RegisterSet(const Target *targ)
137 : restrictedGPR16Range(targ->getChipset() < 0xc0)
138 {
139 init(targ);
140 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
141 reset(static_cast<DataFile>(i));
142 }
143
144 void
145 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
146 {
147 bits[f].periodicMask32(lock, unlock);
148 }
149
150 void
151 RegisterSet::intersect(DataFile f, const RegisterSet *set)
152 {
153 bits[f] |= set->bits[f];
154 }
155
156 void
157 RegisterSet::print(DataFile f) const
158 {
159 INFO("GPR:");
160 bits[f].print();
161 INFO("\n");
162 }
163
164 bool
165 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size)
166 {
167 reg = bits[f].findFreeRange(size);
168 if (reg < 0)
169 return false;
170 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
171 return true;
172 }
173
174 bool
175 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
176 {
177 return bits[f].testRange(reg, size);
178 }
179
180 void
181 RegisterSet::occupy(const Value *v)
182 {
183 occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
184 }
185
186 void
187 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
188 {
189 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
190 }
191
192 void
193 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
194 {
195 bits[f].setRange(reg, size);
196
197 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
198
199 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
200 }
201
202 bool
203 RegisterSet::testOccupy(const Value *v)
204 {
205 return testOccupy(v->reg.file,
206 idToUnits(v), v->reg.size >> unit[v->reg.file]);
207 }
208
209 bool
210 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
211 {
212 if (isOccupied(f, reg, size))
213 return false;
214 occupy(f, reg, size);
215 return true;
216 }
217
218 void
219 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
220 {
221 bits[f].clrRange(reg, size);
222
223 INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
224 }
225
226 class RegAlloc
227 {
228 public:
229 RegAlloc(Program *program) : prog(program), sequence(0) { }
230
231 bool exec();
232 bool execFunc();
233
234 private:
235 class PhiMovesPass : public Pass {
236 private:
237 virtual bool visit(BasicBlock *);
238 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
239 inline void splitEdges(BasicBlock *b);
240 };
241
242 class ArgumentMovesPass : public Pass {
243 private:
244 virtual bool visit(BasicBlock *);
245 };
246
247 class BuildIntervalsPass : public Pass {
248 private:
249 virtual bool visit(BasicBlock *);
250 void collectLiveValues(BasicBlock *);
251 void addLiveRange(Value *, const BasicBlock *, int end);
252 };
253
254 class InsertConstraintsPass : public Pass {
255 public:
256 bool exec(Function *func);
257 private:
258 virtual bool visit(BasicBlock *);
259
260 void insertConstraintMove(Instruction *, int s);
261 bool insertConstraintMoves();
262
263 void condenseDefs(Instruction *);
264 void condenseDefs(Instruction *, const int first, const int last);
265 void condenseSrcs(Instruction *, const int first, const int last);
266
267 void addHazard(Instruction *i, const ValueRef *src);
268 void textureMask(TexInstruction *);
269 void addConstraint(Instruction *, int s, int n);
270 bool detectConflict(Instruction *, int s);
271
272 // target specific functions, TODO: put in subclass or Target
273 void texConstraintNV50(TexInstruction *);
274 void texConstraintNVC0(TexInstruction *);
275 void texConstraintNVE0(TexInstruction *);
276 void texConstraintGM107(TexInstruction *);
277
278 std::list<Instruction *> constrList;
279
280 const Target *targ;
281 };
282
283 bool buildLiveSets(BasicBlock *);
284
285 private:
286 Program *prog;
287 Function *func;
288
289 // instructions in control flow / chronological order
290 ArrayList insns;
291
292 int sequence; // for manual passes through CFG
293 };
294
295 typedef std::pair<Value *, Value *> ValuePair;
296
297 class SpillCodeInserter
298 {
299 public:
300 SpillCodeInserter(Function *fn) : func(fn), stackSize(0), stackBase(0) { }
301
302 bool run(const std::list<ValuePair>&);
303
304 Symbol *assignSlot(const Interval&, const unsigned int size);
305 Value *offsetSlot(Value *, const LValue *);
306 inline int32_t getStackSize() const { return stackSize; }
307
308 private:
309 Function *func;
310
311 struct SpillSlot
312 {
313 Interval occup;
314 std::list<Value *> residents; // needed to recalculate occup
315 Symbol *sym;
316 int32_t offset;
317 inline uint8_t size() const { return sym->reg.size; }
318 };
319 std::list<SpillSlot> slots;
320 int32_t stackSize;
321 int32_t stackBase;
322
323 LValue *unspill(Instruction *usei, LValue *, Value *slot);
324 void spill(Instruction *defi, Value *slot, LValue *);
325 };
326
327 void
328 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
329 const BasicBlock *bb,
330 int end)
331 {
332 Instruction *insn = val->getUniqueInsn();
333
334 if (!insn)
335 insn = bb->getFirst();
336
337 assert(bb->getFirst()->serial <= bb->getExit()->serial);
338 assert(bb->getExit()->serial + 1 >= end);
339
340 int begin = insn->serial;
341 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
342 begin = bb->getEntry()->serial;
343
344 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
345 val->id, begin, insn->serial, end);
346
347 if (begin != end) // empty ranges are only added as hazards for fixed regs
348 val->livei.extend(begin, end);
349 }
350
351 bool
352 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
353 {
354 if (b->cfg.incidentCount() <= 1)
355 return false;
356
357 int n = 0;
358 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
359 if (ei.getType() == Graph::Edge::TREE ||
360 ei.getType() == Graph::Edge::FORWARD)
361 ++n;
362 return (n == 2);
363 }
364
365 struct PhiMapHash {
366 size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
367 return hash<Instruction*>()(val.first) * 31 +
368 hash<BasicBlock*>()(val.second);
369 }
370 };
371
372 typedef unordered_map<
373 std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
374
375 // Critical edges need to be split up so that work can be inserted along
376 // specific edge transitions. Unfortunately manipulating incident edges into a
377 // BB invalidates all the PHI nodes since their sources are implicitly ordered
378 // by incident edge order.
379 //
380 // TODO: Make it so that that is not the case, and PHI nodes store pointers to
381 // the original BBs.
382 void
383 RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
384 {
385 BasicBlock *pb, *pn;
386 Instruction *phi;
387 Graph::EdgeIterator ei;
388 std::stack<BasicBlock *> stack;
389 int j = 0;
390
391 for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
392 pb = BasicBlock::get(ei.getNode());
393 assert(pb);
394 if (needNewElseBlock(bb, pb))
395 stack.push(pb);
396 }
397
398 // No critical edges were found, no need to perform any work.
399 if (stack.empty())
400 return;
401
402 // We're about to, potentially, reorder the inbound edges. This means that
403 // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
404 // nodes after the graph has been modified.
405 PhiMap phis;
406
407 j = 0;
408 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
409 pb = BasicBlock::get(ei.getNode());
410 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
411 phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
412 }
413
414 while (!stack.empty()) {
415 pb = stack.top();
416 pn = new BasicBlock(func);
417 stack.pop();
418
419 pb->cfg.detach(&bb->cfg);
420 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
421 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
422
423 assert(pb->getExit()->op != OP_CALL);
424 if (pb->getExit()->asFlow()->target.bb == bb)
425 pb->getExit()->asFlow()->target.bb = pn;
426
427 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
428 PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
429 assert(it != phis.end());
430 phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
431 phis.erase(it);
432 }
433 }
434
435 // Now go through and fix up all of the phi node sources.
436 j = 0;
437 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
438 pb = BasicBlock::get(ei.getNode());
439 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
440 PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
441 assert(it != phis.end());
442
443 phi->setSrc(j, it->second);
444 }
445 }
446 }
447
448 // For each operand of each PHI in b, generate a new value by inserting a MOV
449 // at the end of the block it is coming from and replace the operand with its
450 // result. This eliminates liveness conflicts and enables us to let values be
451 // copied to the right register if such a conflict exists nonetheless.
452 //
453 // These MOVs are also crucial in making sure the live intervals of phi srces
454 // are extended until the end of the loop, since they are not included in the
455 // live-in sets.
456 bool
457 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
458 {
459 Instruction *phi, *mov;
460
461 splitEdges(bb);
462
463 // insert MOVs (phi->src(j) should stem from j-th in-BB)
464 int j = 0;
465 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
466 BasicBlock *pb = BasicBlock::get(ei.getNode());
467 if (!pb->isTerminated())
468 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
469
470 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
471 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
472 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
473
474 mov->setSrc(0, phi->getSrc(j));
475 mov->setDef(0, tmp);
476 phi->setSrc(j, tmp);
477
478 pb->insertBefore(pb->getExit(), mov);
479 }
480 ++j;
481 }
482
483 return true;
484 }
485
486 bool
487 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
488 {
489 // Bind function call inputs/outputs to the same physical register
490 // the callee uses, inserting moves as appropriate for the case a
491 // conflict arises.
492 for (Instruction *i = bb->getEntry(); i; i = i->next) {
493 FlowInstruction *cal = i->asFlow();
494 // TODO: Handle indirect calls.
495 // Right now they should only be generated for builtins.
496 if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
497 continue;
498 RegisterSet clobberSet(prog->getTarget());
499
500 // Bind input values.
501 for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
502 const int t = cal->indirect ? (s - 1) : s;
503 LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
504 tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
505
506 Instruction *mov =
507 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
508 mov->setDef(0, tmp);
509 mov->setSrc(0, cal->getSrc(s));
510 cal->setSrc(s, tmp);
511
512 bb->insertBefore(cal, mov);
513 }
514
515 // Bind output values.
516 for (int d = 0; cal->defExists(d); ++d) {
517 LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
518 tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
519
520 Instruction *mov =
521 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
522 mov->setSrc(0, tmp);
523 mov->setDef(0, cal->getDef(d));
524 cal->setDef(d, tmp);
525
526 bb->insertAfter(cal, mov);
527 clobberSet.occupy(tmp);
528 }
529
530 // Bind clobbered values.
531 for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
532 it != cal->target.fn->clobbers.end();
533 ++it) {
534 if (clobberSet.testOccupy(*it)) {
535 Value *tmp = new_LValue(func, (*it)->asLValue());
536 tmp->reg.data.id = (*it)->reg.data.id;
537 cal->setDef(cal->defCount(), tmp);
538 }
539 }
540 }
541
542 // Update the clobber set of the function.
543 if (BasicBlock::get(func->cfgExit) == bb) {
544 func->buildDefSets();
545 for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
546 if (bb->defSet.test(i))
547 func->clobbers.push_back(func->getLValue(i));
548 }
549
550 return true;
551 }
552
553 // Build the set of live-in variables of bb.
554 bool
555 RegAlloc::buildLiveSets(BasicBlock *bb)
556 {
557 Function *f = bb->getFunction();
558 BasicBlock *bn;
559 Instruction *i;
560 unsigned int s, d;
561
562 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
563
564 bb->liveSet.allocate(func->allLValues.getSize(), false);
565
566 int n = 0;
567 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
568 bn = BasicBlock::get(ei.getNode());
569 if (bn == bb)
570 continue;
571 if (bn->cfg.visit(sequence))
572 if (!buildLiveSets(bn))
573 return false;
574 if (n++ || bb->liveSet.marker)
575 bb->liveSet |= bn->liveSet;
576 else
577 bb->liveSet = bn->liveSet;
578 }
579 if (!n && !bb->liveSet.marker)
580 bb->liveSet.fill(0);
581 bb->liveSet.marker = true;
582
583 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
584 INFO("BB:%i live set of out blocks:\n", bb->getId());
585 bb->liveSet.print();
586 }
587
588 // if (!bb->getEntry())
589 // return true;
590
591 if (bb == BasicBlock::get(f->cfgExit)) {
592 for (std::deque<ValueRef>::iterator it = f->outs.begin();
593 it != f->outs.end(); ++it) {
594 assert(it->get()->asLValue());
595 bb->liveSet.set(it->get()->id);
596 }
597 }
598
599 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
600 for (d = 0; i->defExists(d); ++d)
601 bb->liveSet.clr(i->getDef(d)->id);
602 for (s = 0; i->srcExists(s); ++s)
603 if (i->getSrc(s)->asLValue())
604 bb->liveSet.set(i->getSrc(s)->id);
605 }
606 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
607 bb->liveSet.clr(i->getDef(0)->id);
608
609 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
610 INFO("BB:%i live set after propagation:\n", bb->getId());
611 bb->liveSet.print();
612 }
613
614 return true;
615 }
616
617 void
618 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
619 {
620 BasicBlock *bbA = NULL, *bbB = NULL;
621
622 if (bb->cfg.outgoingCount()) {
623 // trickery to save a loop of OR'ing liveSets
624 // aliasing works fine with BitSet::setOr
625 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
626 if (ei.getType() == Graph::Edge::DUMMY)
627 continue;
628 if (bbA) {
629 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
630 bbA = bb;
631 } else {
632 bbA = bbB;
633 }
634 bbB = BasicBlock::get(ei.getNode());
635 }
636 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
637 } else
638 if (bb->cfg.incidentCount()) {
639 bb->liveSet.fill(0);
640 }
641 }
642
643 bool
644 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
645 {
646 collectLiveValues(bb);
647
648 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
649
650 // go through out blocks and delete phi sources that do not originate from
651 // the current block from the live set
652 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
653 BasicBlock *out = BasicBlock::get(ei.getNode());
654
655 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
656 bb->liveSet.clr(i->getDef(0)->id);
657
658 for (int s = 0; i->srcExists(s); ++s) {
659 assert(i->src(s).getInsn());
660 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
661 bb->liveSet.set(i->getSrc(s)->id);
662 else
663 bb->liveSet.clr(i->getSrc(s)->id);
664 }
665 }
666 }
667
668 // remaining live-outs are live until end
669 if (bb->getExit()) {
670 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
671 if (bb->liveSet.test(j))
672 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
673 }
674
675 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
676 for (int d = 0; i->defExists(d); ++d) {
677 bb->liveSet.clr(i->getDef(d)->id);
678 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
679 i->getDef(d)->livei.extend(i->serial, i->serial);
680 }
681
682 for (int s = 0; i->srcExists(s); ++s) {
683 if (!i->getSrc(s)->asLValue())
684 continue;
685 if (!bb->liveSet.test(i->getSrc(s)->id)) {
686 bb->liveSet.set(i->getSrc(s)->id);
687 addLiveRange(i->getSrc(s), bb, i->serial);
688 }
689 }
690 }
691
692 if (bb == BasicBlock::get(func->cfg.getRoot())) {
693 for (std::deque<ValueDef>::iterator it = func->ins.begin();
694 it != func->ins.end(); ++it) {
695 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
696 it->get()->livei.extend(0, 1);
697 }
698 }
699
700 return true;
701 }
702
703
704 #define JOIN_MASK_PHI (1 << 0)
705 #define JOIN_MASK_UNION (1 << 1)
706 #define JOIN_MASK_MOV (1 << 2)
707 #define JOIN_MASK_TEX (1 << 3)
708
709 class GCRA
710 {
711 public:
712 GCRA(Function *, SpillCodeInserter&);
713 ~GCRA();
714
715 bool allocateRegisters(ArrayList& insns);
716
717 void printNodeInfo() const;
718
719 private:
720 class RIG_Node : public Graph::Node
721 {
722 public:
723 RIG_Node();
724
725 void init(const RegisterSet&, LValue *);
726
727 void addInterference(RIG_Node *);
728 void addRegPreference(RIG_Node *);
729
730 inline LValue *getValue() const
731 {
732 return reinterpret_cast<LValue *>(data);
733 }
734 inline void setValue(LValue *lval) { data = lval; }
735
736 inline uint8_t getCompMask() const
737 {
738 return ((1 << colors) - 1) << (reg & 7);
739 }
740
741 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
742 {
743 return static_cast<RIG_Node *>(ei.getNode());
744 }
745
746 public:
747 uint32_t degree;
748 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
749 uint16_t colors;
750
751 DataFile f;
752 int32_t reg;
753
754 float weight;
755
756 // list pointers for simplify() phase
757 RIG_Node *next;
758 RIG_Node *prev;
759
760 // union of the live intervals of all coalesced values (we want to retain
761 // the separate intervals for testing interference of compound values)
762 Interval livei;
763
764 std::list<RIG_Node *> prefRegs;
765 };
766
767 private:
768 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
769
770 void buildRIG(ArrayList&);
771 bool coalesce(ArrayList&);
772 bool doCoalesce(ArrayList&, unsigned int mask);
773 void calculateSpillWeights();
774 bool simplify();
775 bool selectRegisters();
776 void cleanup(const bool success);
777
778 void simplifyEdge(RIG_Node *, RIG_Node *);
779 void simplifyNode(RIG_Node *);
780
781 bool coalesceValues(Value *, Value *, bool force);
782 void resolveSplitsAndMerges();
783 void makeCompound(Instruction *, bool isSplit);
784
785 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
786
787 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
788 void checkList(std::list<RIG_Node *>&);
789
790 private:
791 std::stack<uint32_t> stack;
792
793 // list headers for simplify() phase
794 RIG_Node lo[2];
795 RIG_Node hi;
796
797 Graph RIG;
798 RIG_Node *nodes;
799 unsigned int nodeCount;
800
801 Function *func;
802 Program *prog;
803
804 static uint8_t relDegree[17][17];
805
806 RegisterSet regs;
807
808 // need to fixup register id for participants of OP_MERGE/SPLIT
809 std::list<Instruction *> merges;
810 std::list<Instruction *> splits;
811
812 SpillCodeInserter& spill;
813 std::list<ValuePair> mustSpill;
814 };
815
816 uint8_t GCRA::relDegree[17][17];
817
818 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
819 {
820 colors = 0;
821 }
822
823 void
824 GCRA::printNodeInfo() const
825 {
826 for (unsigned int i = 0; i < nodeCount; ++i) {
827 if (!nodes[i].colors)
828 continue;
829 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
830 i,
831 nodes[i].f,nodes[i].reg,nodes[i].colors,
832 nodes[i].weight,
833 nodes[i].degree, nodes[i].degreeLimit);
834
835 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
836 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
837 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
838 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
839 INFO("\n");
840 }
841 }
842
843 static bool
844 isShortRegOp(Instruction *insn)
845 {
846 // Immediates are always in src1. Every other situation can be resolved by
847 // using a long encoding.
848 return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE;
849 }
850
851 // Check if this LValue is ever used in an instruction that can't be encoded
852 // with long registers (i.e. > r63)
853 static bool
854 isShortRegVal(LValue *lval)
855 {
856 if (lval->getInsn() == NULL)
857 return false;
858 for (Value::DefCIterator def = lval->defs.begin();
859 def != lval->defs.end(); ++def)
860 if (isShortRegOp((*def)->getInsn()))
861 return true;
862 for (Value::UseCIterator use = lval->uses.begin();
863 use != lval->uses.end(); ++use)
864 if (isShortRegOp((*use)->getInsn()))
865 return true;
866 return false;
867 }
868
869 void
870 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
871 {
872 setValue(lval);
873 if (lval->reg.data.id >= 0)
874 lval->noSpill = lval->fixedReg = 1;
875
876 colors = regs.units(lval->reg.file, lval->reg.size);
877 f = lval->reg.file;
878 reg = -1;
879 if (lval->reg.data.id >= 0)
880 reg = regs.idToUnits(lval);
881
882 weight = std::numeric_limits<float>::infinity();
883 degree = 0;
884 int size = regs.getFileSize(f, lval->reg.size);
885 // On nv50, we lose a bit of gpr encoding when there's an embedded
886 // immediate.
887 if (regs.restrictedGPR16Range && f == FILE_GPR && isShortRegVal(lval))
888 size /= 2;
889 degreeLimit = size;
890 degreeLimit -= relDegree[1][colors] - 1;
891
892 livei.insert(lval->livei);
893 }
894
895 bool
896 GCRA::coalesceValues(Value *dst, Value *src, bool force)
897 {
898 LValue *rep = dst->join->asLValue();
899 LValue *val = src->join->asLValue();
900
901 if (!force && val->reg.data.id >= 0) {
902 rep = src->join->asLValue();
903 val = dst->join->asLValue();
904 }
905 RIG_Node *nRep = &nodes[rep->id];
906 RIG_Node *nVal = &nodes[val->id];
907
908 if (src->reg.file != dst->reg.file) {
909 if (!force)
910 return false;
911 WARN("forced coalescing of values in different files !\n");
912 }
913 if (!force && dst->reg.size != src->reg.size)
914 return false;
915
916 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
917 if (force) {
918 if (val->reg.data.id >= 0)
919 WARN("forced coalescing of values in different fixed regs !\n");
920 } else {
921 if (val->reg.data.id >= 0)
922 return false;
923 // make sure that there is no overlap with the fixed register of rep
924 for (ArrayList::Iterator it = func->allLValues.iterator();
925 !it.end(); it.next()) {
926 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
927 assert(reg);
928 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
929 return false;
930 }
931 }
932 }
933
934 if (!force && nRep->livei.overlaps(nVal->livei))
935 return false;
936
937 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
938 rep->id, rep->reg.data.id, val->id);
939
940 // set join pointer of all values joined with val
941 for (Value::DefIterator def = val->defs.begin(); def != val->defs.end();
942 ++def)
943 (*def)->get()->join = rep;
944 assert(rep->join == rep && val->join == rep);
945
946 // add val's definitions to rep and extend the live interval of its RIG node
947 rep->defs.insert(rep->defs.end(), val->defs.begin(), val->defs.end());
948 nRep->livei.unify(nVal->livei);
949 return true;
950 }
951
952 bool
953 GCRA::coalesce(ArrayList& insns)
954 {
955 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
956 if (!ret)
957 return false;
958 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
959 case 0x50:
960 case 0x80:
961 case 0x90:
962 case 0xa0:
963 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
964 break;
965 case 0xc0:
966 case 0xd0:
967 case 0xe0:
968 case 0xf0:
969 case 0x100:
970 case 0x110:
971 case 0x120:
972 case 0x130:
973 ret = doCoalesce(insns, JOIN_MASK_UNION);
974 break;
975 default:
976 break;
977 }
978 if (!ret)
979 return false;
980 return doCoalesce(insns, JOIN_MASK_MOV);
981 }
982
983 static inline uint8_t makeCompMask(int compSize, int base, int size)
984 {
985 uint8_t m = ((1 << size) - 1) << base;
986
987 switch (compSize) {
988 case 1:
989 return 0xff;
990 case 2:
991 m |= (m << 2);
992 return (m << 4) | m;
993 case 3:
994 case 4:
995 return (m << 4) | m;
996 default:
997 assert(compSize <= 8);
998 return m;
999 }
1000 }
1001
1002 // Used when coalescing moves. The non-compound value will become one, e.g.:
1003 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
1004 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
1005 static inline void copyCompound(Value *dst, Value *src)
1006 {
1007 LValue *ldst = dst->asLValue();
1008 LValue *lsrc = src->asLValue();
1009
1010 if (ldst->compound && !lsrc->compound) {
1011 LValue *swap = lsrc;
1012 lsrc = ldst;
1013 ldst = swap;
1014 }
1015
1016 ldst->compound = lsrc->compound;
1017 ldst->compMask = lsrc->compMask;
1018 }
1019
1020 void
1021 GCRA::makeCompound(Instruction *insn, bool split)
1022 {
1023 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
1024
1025 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
1026 INFO("makeCompound(split = %i): ", split);
1027 insn->print();
1028 }
1029
1030 const unsigned int size = getNode(rep)->colors;
1031 unsigned int base = 0;
1032
1033 if (!rep->compound)
1034 rep->compMask = 0xff;
1035 rep->compound = 1;
1036
1037 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
1038 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
1039
1040 val->compound = 1;
1041 if (!val->compMask)
1042 val->compMask = 0xff;
1043 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
1044 assert(val->compMask);
1045
1046 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
1047 rep->id, rep->compMask, val->id, val->compMask);
1048
1049 base += getNode(val)->colors;
1050 }
1051 assert(base == size);
1052 }
1053
1054 bool
1055 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
1056 {
1057 int c, n;
1058
1059 for (n = 0; n < insns.getSize(); ++n) {
1060 Instruction *i;
1061 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
1062
1063 switch (insn->op) {
1064 case OP_PHI:
1065 if (!(mask & JOIN_MASK_PHI))
1066 break;
1067 for (c = 0; insn->srcExists(c); ++c)
1068 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
1069 // this is bad
1070 ERROR("failed to coalesce phi operands\n");
1071 return false;
1072 }
1073 break;
1074 case OP_UNION:
1075 case OP_MERGE:
1076 if (!(mask & JOIN_MASK_UNION))
1077 break;
1078 for (c = 0; insn->srcExists(c); ++c)
1079 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
1080 if (insn->op == OP_MERGE) {
1081 merges.push_back(insn);
1082 if (insn->srcExists(1))
1083 makeCompound(insn, false);
1084 }
1085 break;
1086 case OP_SPLIT:
1087 if (!(mask & JOIN_MASK_UNION))
1088 break;
1089 splits.push_back(insn);
1090 for (c = 0; insn->defExists(c); ++c)
1091 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
1092 makeCompound(insn, true);
1093 break;
1094 case OP_MOV:
1095 if (!(mask & JOIN_MASK_MOV))
1096 break;
1097 i = NULL;
1098 if (!insn->getDef(0)->uses.empty())
1099 i = (*insn->getDef(0)->uses.begin())->getInsn();
1100 // if this is a contraint-move there will only be a single use
1101 if (i && i->op == OP_MERGE) // do we really still need this ?
1102 break;
1103 i = insn->getSrc(0)->getUniqueInsn();
1104 if (i && !i->constrainedDefs()) {
1105 if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
1106 copyCompound(insn->getSrc(0), insn->getDef(0));
1107 }
1108 break;
1109 case OP_TEX:
1110 case OP_TXB:
1111 case OP_TXL:
1112 case OP_TXF:
1113 case OP_TXQ:
1114 case OP_TXD:
1115 case OP_TXG:
1116 case OP_TXLQ:
1117 case OP_TEXCSAA:
1118 case OP_TEXPREP:
1119 if (!(mask & JOIN_MASK_TEX))
1120 break;
1121 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1122 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1123 break;
1124 default:
1125 break;
1126 }
1127 }
1128 return true;
1129 }
1130
1131 void
1132 GCRA::RIG_Node::addInterference(RIG_Node *node)
1133 {
1134 this->degree += relDegree[node->colors][colors];
1135 node->degree += relDegree[colors][node->colors];
1136
1137 this->attach(node, Graph::Edge::CROSS);
1138 }
1139
1140 void
1141 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1142 {
1143 prefRegs.push_back(node);
1144 }
1145
1146 GCRA::GCRA(Function *fn, SpillCodeInserter& spill) :
1147 func(fn),
1148 regs(fn->getProgram()->getTarget()),
1149 spill(spill)
1150 {
1151 prog = func->getProgram();
1152
1153 // initialize relative degrees array - i takes away from j
1154 for (int i = 1; i <= 16; ++i)
1155 for (int j = 1; j <= 16; ++j)
1156 relDegree[i][j] = j * ((i + j - 1) / j);
1157 }
1158
1159 GCRA::~GCRA()
1160 {
1161 if (nodes)
1162 delete[] nodes;
1163 }
1164
1165 void
1166 GCRA::checkList(std::list<RIG_Node *>& lst)
1167 {
1168 GCRA::RIG_Node *prev = NULL;
1169
1170 for (std::list<RIG_Node *>::iterator it = lst.begin();
1171 it != lst.end();
1172 ++it) {
1173 assert((*it)->getValue()->join == (*it)->getValue());
1174 if (prev)
1175 assert(prev->livei.begin() <= (*it)->livei.begin());
1176 prev = *it;
1177 }
1178 }
1179
1180 void
1181 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1182 {
1183 if (node->livei.isEmpty())
1184 return;
1185 // only the intervals of joined values don't necessarily arrive in order
1186 std::list<RIG_Node *>::iterator prev, it;
1187 for (it = list.end(); it != list.begin(); it = prev) {
1188 prev = it;
1189 --prev;
1190 if ((*prev)->livei.begin() <= node->livei.begin())
1191 break;
1192 }
1193 list.insert(it, node);
1194 }
1195
1196 void
1197 GCRA::buildRIG(ArrayList& insns)
1198 {
1199 std::list<RIG_Node *> values, active;
1200
1201 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1202 it != func->ins.end(); ++it)
1203 insertOrderedTail(values, getNode(it->get()->asLValue()));
1204
1205 for (int i = 0; i < insns.getSize(); ++i) {
1206 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1207 for (int d = 0; insn->defExists(d); ++d)
1208 if (insn->getDef(d)->rep() == insn->getDef(d))
1209 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1210 }
1211 checkList(values);
1212
1213 while (!values.empty()) {
1214 RIG_Node *cur = values.front();
1215
1216 for (std::list<RIG_Node *>::iterator it = active.begin();
1217 it != active.end();) {
1218 RIG_Node *node = *it;
1219
1220 if (node->livei.end() <= cur->livei.begin()) {
1221 it = active.erase(it);
1222 } else {
1223 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1224 cur->addInterference(node);
1225 ++it;
1226 }
1227 }
1228 values.pop_front();
1229 active.push_back(cur);
1230 }
1231 }
1232
1233 void
1234 GCRA::calculateSpillWeights()
1235 {
1236 for (unsigned int i = 0; i < nodeCount; ++i) {
1237 RIG_Node *const n = &nodes[i];
1238 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1239 continue;
1240 if (nodes[i].reg >= 0) {
1241 // update max reg
1242 regs.occupy(n->f, n->reg, n->colors);
1243 continue;
1244 }
1245 LValue *val = nodes[i].getValue();
1246
1247 if (!val->noSpill) {
1248 int rc = 0;
1249 for (Value::DefIterator it = val->defs.begin();
1250 it != val->defs.end();
1251 ++it)
1252 rc += (*it)->get()->refCount();
1253
1254 nodes[i].weight =
1255 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1256 }
1257
1258 if (nodes[i].degree < nodes[i].degreeLimit) {
1259 int l = 0;
1260 if (val->reg.size > 4)
1261 l = 1;
1262 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1263 } else {
1264 DLLIST_ADDHEAD(&hi, &nodes[i]);
1265 }
1266 }
1267 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1268 printNodeInfo();
1269 }
1270
1271 void
1272 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1273 {
1274 bool move = b->degree >= b->degreeLimit;
1275
1276 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1277 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1278 a->getValue()->id, a->degree, a->degreeLimit,
1279 b->getValue()->id, b->degree, b->degreeLimit);
1280
1281 b->degree -= relDegree[a->colors][b->colors];
1282
1283 move = move && b->degree < b->degreeLimit;
1284 if (move && !DLLIST_EMPTY(b)) {
1285 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1286 DLLIST_DEL(b);
1287 DLLIST_ADDTAIL(&lo[l], b);
1288 }
1289 }
1290
1291 void
1292 GCRA::simplifyNode(RIG_Node *node)
1293 {
1294 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1295 simplifyEdge(node, RIG_Node::get(ei));
1296
1297 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1298 simplifyEdge(node, RIG_Node::get(ei));
1299
1300 DLLIST_DEL(node);
1301 stack.push(node->getValue()->id);
1302
1303 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1304 node->getValue()->id,
1305 (node->degree < node->degreeLimit) ? "" : "(spill)");
1306 }
1307
1308 bool
1309 GCRA::simplify()
1310 {
1311 for (;;) {
1312 if (!DLLIST_EMPTY(&lo[0])) {
1313 do {
1314 simplifyNode(lo[0].next);
1315 } while (!DLLIST_EMPTY(&lo[0]));
1316 } else
1317 if (!DLLIST_EMPTY(&lo[1])) {
1318 simplifyNode(lo[1].next);
1319 } else
1320 if (!DLLIST_EMPTY(&hi)) {
1321 RIG_Node *best = hi.next;
1322 float bestScore = best->weight / (float)best->degree;
1323 // spill candidate
1324 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1325 float score = it->weight / (float)it->degree;
1326 if (score < bestScore) {
1327 best = it;
1328 bestScore = score;
1329 }
1330 }
1331 if (isinf(bestScore)) {
1332 ERROR("no viable spill candidates left\n");
1333 return false;
1334 }
1335 simplifyNode(best);
1336 } else {
1337 return true;
1338 }
1339 }
1340 }
1341
1342 void
1343 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1344 {
1345 const RIG_Node *intf = RIG_Node::get(ei);
1346
1347 if (intf->reg < 0)
1348 return;
1349 const LValue *vA = node->getValue();
1350 const LValue *vB = intf->getValue();
1351
1352 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1353
1354 if (vA->compound | vB->compound) {
1355 // NOTE: this only works for >aligned< register tuples !
1356 for (Value::DefCIterator D = vA->defs.begin(); D != vA->defs.end(); ++D) {
1357 for (Value::DefCIterator d = vB->defs.begin(); d != vB->defs.end(); ++d) {
1358 const LValue *vD = (*D)->get()->asLValue();
1359 const LValue *vd = (*d)->get()->asLValue();
1360
1361 if (!vD->livei.overlaps(vd->livei)) {
1362 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1363 vD->id, vd->id);
1364 continue;
1365 }
1366
1367 uint8_t mask = vD->compound ? vD->compMask : ~0;
1368 if (vd->compound) {
1369 assert(vB->compound);
1370 mask &= vd->compMask & vB->compMask;
1371 } else {
1372 mask &= intfMask;
1373 }
1374
1375 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1376 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1377 vD->id,
1378 vD->compound ? vD->compMask : 0xff,
1379 vd->id,
1380 vd->compound ? vd->compMask : intfMask,
1381 vB->compMask, intf->reg & ~7, mask);
1382 if (mask)
1383 regs.occupyMask(node->f, intf->reg & ~7, mask);
1384 }
1385 }
1386 } else {
1387 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1388 "(%%%i) X (%%%i): $r%i + %u\n",
1389 vA->id, vB->id, intf->reg, intf->colors);
1390 regs.occupy(node->f, intf->reg, intf->colors);
1391 }
1392 }
1393
1394 bool
1395 GCRA::selectRegisters()
1396 {
1397 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1398
1399 while (!stack.empty()) {
1400 RIG_Node *node = &nodes[stack.top()];
1401 stack.pop();
1402
1403 regs.reset(node->f);
1404
1405 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1406 node->getValue()->id, node->colors);
1407
1408 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1409 checkInterference(node, ei);
1410 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1411 checkInterference(node, ei);
1412
1413 if (!node->prefRegs.empty()) {
1414 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1415 it != node->prefRegs.end();
1416 ++it) {
1417 if ((*it)->reg >= 0 &&
1418 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1419 node->reg = (*it)->reg;
1420 break;
1421 }
1422 }
1423 }
1424 if (node->reg >= 0)
1425 continue;
1426 LValue *lval = node->getValue();
1427 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1428 regs.print(node->f);
1429 bool ret = regs.assign(node->reg, node->f, node->colors);
1430 if (ret) {
1431 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1432 lval->compMask = node->getCompMask();
1433 } else {
1434 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1435 lval->id, lval->reg.size);
1436 Symbol *slot = NULL;
1437 if (lval->reg.file == FILE_GPR)
1438 slot = spill.assignSlot(node->livei, lval->reg.size);
1439 mustSpill.push_back(ValuePair(lval, slot));
1440 }
1441 }
1442 if (!mustSpill.empty())
1443 return false;
1444 for (unsigned int i = 0; i < nodeCount; ++i) {
1445 LValue *lval = nodes[i].getValue();
1446 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1447 lval->reg.data.id =
1448 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1449 }
1450 return true;
1451 }
1452
1453 bool
1454 GCRA::allocateRegisters(ArrayList& insns)
1455 {
1456 bool ret;
1457
1458 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1459 "allocateRegisters to %u instructions\n", insns.getSize());
1460
1461 nodeCount = func->allLValues.getSize();
1462 nodes = new RIG_Node[nodeCount];
1463 if (!nodes)
1464 return false;
1465 for (unsigned int i = 0; i < nodeCount; ++i) {
1466 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1467 if (lval) {
1468 nodes[i].init(regs, lval);
1469 RIG.insert(&nodes[i]);
1470
1471 if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL) {
1472 Instruction *insn = lval->getInsn();
1473 if (insn->op != OP_MAD && insn->op != OP_FMA && insn->op != OP_SAD)
1474 continue;
1475 // For both of the cases below, we only want to add the preference
1476 // if all arguments are in registers.
1477 if (insn->src(0).getFile() != FILE_GPR ||
1478 insn->src(1).getFile() != FILE_GPR ||
1479 insn->src(2).getFile() != FILE_GPR)
1480 continue;
1481 if (prog->getTarget()->getChipset() < 0xc0) {
1482 // Outputting a flag is not supported with short encodings nor
1483 // with immediate arguments.
1484 // See handleMADforNV50.
1485 if (insn->flagsDef >= 0)
1486 continue;
1487 } else {
1488 // We can only fold immediate arguments if dst == src2. This
1489 // only matters if one of the first two arguments is an
1490 // immediate. This form is also only supported for floats.
1491 // See handleMADforNVC0.
1492 ImmediateValue imm;
1493 if (insn->dType != TYPE_F32)
1494 continue;
1495 if (!insn->src(0).getImmediate(imm) &&
1496 !insn->src(1).getImmediate(imm))
1497 continue;
1498 }
1499
1500 nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
1501 }
1502 }
1503 }
1504
1505 // coalesce first, we use only 1 RIG node for a group of joined values
1506 ret = coalesce(insns);
1507 if (!ret)
1508 goto out;
1509
1510 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1511 func->printLiveIntervals();
1512
1513 buildRIG(insns);
1514 calculateSpillWeights();
1515 ret = simplify();
1516 if (!ret)
1517 goto out;
1518
1519 ret = selectRegisters();
1520 if (!ret) {
1521 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1522 "selectRegisters failed, inserting spill code ...\n");
1523 regs.reset(FILE_GPR, true);
1524 spill.run(mustSpill);
1525 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1526 func->print();
1527 } else {
1528 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1529 }
1530
1531 out:
1532 cleanup(ret);
1533 return ret;
1534 }
1535
1536 void
1537 GCRA::cleanup(const bool success)
1538 {
1539 mustSpill.clear();
1540
1541 for (ArrayList::Iterator it = func->allLValues.iterator();
1542 !it.end(); it.next()) {
1543 LValue *lval = reinterpret_cast<LValue *>(it.get());
1544
1545 lval->livei.clear();
1546
1547 lval->compound = 0;
1548 lval->compMask = 0;
1549
1550 if (lval->join == lval)
1551 continue;
1552
1553 if (success) {
1554 lval->reg.data.id = lval->join->reg.data.id;
1555 } else {
1556 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1557 ++d)
1558 lval->join->defs.remove(*d);
1559 lval->join = lval;
1560 }
1561 }
1562
1563 if (success)
1564 resolveSplitsAndMerges();
1565 splits.clear(); // avoid duplicate entries on next coalesce pass
1566 merges.clear();
1567
1568 delete[] nodes;
1569 nodes = NULL;
1570 hi.next = hi.prev = &hi;
1571 lo[0].next = lo[0].prev = &lo[0];
1572 lo[1].next = lo[1].prev = &lo[1];
1573 }
1574
1575 Symbol *
1576 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1577 {
1578 SpillSlot slot;
1579 int32_t offsetBase = stackSize;
1580 int32_t offset;
1581 std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1582
1583 if (offsetBase % size)
1584 offsetBase += size - (offsetBase % size);
1585
1586 slot.sym = NULL;
1587
1588 for (offset = offsetBase; offset < stackSize; offset += size) {
1589 const int32_t entryEnd = offset + size;
1590 while (it != slots.end() && it->offset < offset)
1591 ++it;
1592 if (it == slots.end()) // no slots left
1593 break;
1594 std::list<SpillSlot>::iterator bgn = it;
1595
1596 while (it != slots.end() && it->offset < entryEnd) {
1597 it->occup.print();
1598 if (it->occup.overlaps(livei))
1599 break;
1600 ++it;
1601 }
1602 if (it == slots.end() || it->offset >= entryEnd) {
1603 // fits
1604 for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1605 bgn->occup.insert(livei);
1606 if (bgn->size() == size)
1607 slot.sym = bgn->sym;
1608 }
1609 break;
1610 }
1611 }
1612 if (!slot.sym) {
1613 stackSize = offset + size;
1614 slot.offset = offset;
1615 slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1616 if (!func->stackPtr)
1617 offset += func->tlsBase;
1618 slot.sym->setAddress(NULL, offset);
1619 slot.sym->reg.size = size;
1620 slots.insert(pos, slot)->occup.insert(livei);
1621 }
1622 return slot.sym;
1623 }
1624
1625 Value *
1626 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1627 {
1628 if (!lval->compound || (lval->compMask & 0x1))
1629 return base;
1630 Value *slot = cloneShallow(func, base);
1631
1632 slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1633 slot->reg.size = lval->reg.size;
1634
1635 return slot;
1636 }
1637
1638 void
1639 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1640 {
1641 const DataType ty = typeOfSize(lval->reg.size);
1642
1643 slot = offsetSlot(slot, lval);
1644
1645 Instruction *st;
1646 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1647 lval->noSpill = 1;
1648 if (ty != TYPE_B96) {
1649 st = new_Instruction(func, OP_STORE, ty);
1650 st->setSrc(0, slot);
1651 st->setSrc(1, lval);
1652 } else {
1653 st = new_Instruction(func, OP_SPLIT, ty);
1654 st->setSrc(0, lval);
1655 for (int d = 0; d < lval->reg.size / 4; ++d)
1656 st->setDef(d, new_LValue(func, FILE_GPR));
1657
1658 for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
1659 Value *tmp = cloneShallow(func, slot);
1660 tmp->reg.size = 4;
1661 tmp->reg.data.offset += 4 * d;
1662
1663 Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
1664 s->setSrc(0, tmp);
1665 s->setSrc(1, st->getDef(d));
1666 defi->bb->insertAfter(defi, s);
1667 }
1668 }
1669 } else {
1670 st = new_Instruction(func, OP_CVT, ty);
1671 st->setDef(0, slot);
1672 st->setSrc(0, lval);
1673 if (lval->reg.file == FILE_FLAGS)
1674 st->flagsSrc = 0;
1675 }
1676 defi->bb->insertAfter(defi, st);
1677 }
1678
1679 LValue *
1680 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1681 {
1682 const DataType ty = typeOfSize(lval->reg.size);
1683
1684 slot = offsetSlot(slot, lval);
1685 lval = cloneShallow(func, lval);
1686
1687 Instruction *ld;
1688 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1689 lval->noSpill = 1;
1690 if (ty != TYPE_B96) {
1691 ld = new_Instruction(func, OP_LOAD, ty);
1692 } else {
1693 ld = new_Instruction(func, OP_MERGE, ty);
1694 for (int d = 0; d < lval->reg.size / 4; ++d) {
1695 Value *tmp = cloneShallow(func, slot);
1696 LValue *val;
1697 tmp->reg.size = 4;
1698 tmp->reg.data.offset += 4 * d;
1699
1700 Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
1701 l->setDef(0, (val = new_LValue(func, FILE_GPR)));
1702 l->setSrc(0, tmp);
1703 usei->bb->insertBefore(usei, l);
1704 ld->setSrc(d, val);
1705 val->noSpill = 1;
1706 }
1707 ld->setDef(0, lval);
1708 usei->bb->insertBefore(usei, ld);
1709 return lval;
1710 }
1711 } else {
1712 ld = new_Instruction(func, OP_CVT, ty);
1713 }
1714 ld->setDef(0, lval);
1715 ld->setSrc(0, slot);
1716 if (lval->reg.file == FILE_FLAGS)
1717 ld->flagsDef = 0;
1718
1719 usei->bb->insertBefore(usei, ld);
1720 return lval;
1721 }
1722
1723 static bool
1724 value_cmp(ValueRef *a, ValueRef *b) {
1725 Instruction *ai = a->getInsn(), *bi = b->getInsn();
1726 if (ai->bb != bi->bb)
1727 return ai->bb->getId() < bi->bb->getId();
1728 return ai->serial < bi->serial;
1729 }
1730
1731 // For each value that is to be spilled, go through all its definitions.
1732 // A value can have multiple definitions if it has been coalesced before.
1733 // For each definition, first go through all its uses and insert an unspill
1734 // instruction before it, then replace the use with the temporary register.
1735 // Unspill can be either a load from memory or simply a move to another
1736 // register file.
1737 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1738 // if we have spilled to a memory location, or simply with the new register.
1739 // No load or conversion instruction should be needed.
1740 bool
1741 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1742 {
1743 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1744 ++it) {
1745 LValue *lval = it->first->asLValue();
1746 Symbol *mem = it->second ? it->second->asSym() : NULL;
1747
1748 // Keep track of which instructions to delete later. Deleting them
1749 // inside the loop is unsafe since a single instruction may have
1750 // multiple destinations that all need to be spilled (like OP_SPLIT).
1751 unordered_set<Instruction *> to_del;
1752
1753 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1754 ++d) {
1755 Value *slot = mem ?
1756 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1757 Value *tmp = NULL;
1758 Instruction *last = NULL;
1759
1760 LValue *dval = (*d)->get()->asLValue();
1761 Instruction *defi = (*d)->getInsn();
1762
1763 // Sort all the uses by BB/instruction so that we don't unspill
1764 // multiple times in a row, and also remove a source of
1765 // non-determinism.
1766 std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
1767 std::sort(refs.begin(), refs.end(), value_cmp);
1768
1769 // Unspill at each use *before* inserting spill instructions,
1770 // we don't want to have the spill instructions in the use list here.
1771 for (std::vector<ValueRef*>::const_iterator it = refs.begin();
1772 it != refs.end(); ++it) {
1773 ValueRef *u = *it;
1774 Instruction *usei = u->getInsn();
1775 assert(usei);
1776 if (usei->isPseudo()) {
1777 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1778 last = NULL;
1779 } else {
1780 if (!last || (usei != last->next && usei != last))
1781 tmp = unspill(usei, dval, slot);
1782 last = usei;
1783 }
1784 u->set(tmp);
1785 }
1786
1787 assert(defi);
1788 if (defi->isPseudo()) {
1789 d = lval->defs.erase(d);
1790 --d;
1791 if (slot->reg.file == FILE_MEMORY_LOCAL)
1792 to_del.insert(defi);
1793 else
1794 defi->setDef(0, slot);
1795 } else {
1796 spill(defi, slot, dval);
1797 }
1798 }
1799
1800 for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
1801 it != to_del.end(); ++it)
1802 delete_Instruction(func->getProgram(), *it);
1803 }
1804
1805 // TODO: We're not trying to reuse old slots in a potential next iteration.
1806 // We have to update the slots' livei intervals to be able to do that.
1807 stackBase = stackSize;
1808 slots.clear();
1809 return true;
1810 }
1811
1812 bool
1813 RegAlloc::exec()
1814 {
1815 for (IteratorRef it = prog->calls.iteratorDFS(false);
1816 !it->end(); it->next()) {
1817 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1818
1819 func->tlsBase = prog->tlsSize;
1820 if (!execFunc())
1821 return false;
1822 prog->tlsSize += func->tlsSize;
1823 }
1824 return true;
1825 }
1826
1827 bool
1828 RegAlloc::execFunc()
1829 {
1830 InsertConstraintsPass insertConstr;
1831 PhiMovesPass insertPhiMoves;
1832 ArgumentMovesPass insertArgMoves;
1833 BuildIntervalsPass buildIntervals;
1834 SpillCodeInserter insertSpills(func);
1835
1836 GCRA gcra(func, insertSpills);
1837
1838 unsigned int i, retries;
1839 bool ret;
1840
1841 if (!func->ins.empty()) {
1842 // Insert a nop at the entry so inputs only used by the first instruction
1843 // don't count as having an empty live range.
1844 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1845 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1846 }
1847
1848 ret = insertConstr.exec(func);
1849 if (!ret)
1850 goto out;
1851
1852 ret = insertPhiMoves.run(func);
1853 if (!ret)
1854 goto out;
1855
1856 ret = insertArgMoves.run(func);
1857 if (!ret)
1858 goto out;
1859
1860 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1861 for (retries = 0; retries < 3; ++retries) {
1862 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1863 INFO("Retry: %i\n", retries);
1864 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1865 func->print();
1866
1867 // spilling to registers may add live ranges, need to rebuild everything
1868 ret = true;
1869 for (sequence = func->cfg.nextSequence(), i = 0;
1870 ret && i <= func->loopNestingBound;
1871 sequence = func->cfg.nextSequence(), ++i)
1872 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1873 // reset marker
1874 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1875 !bi.end(); bi.next())
1876 BasicBlock::get(bi)->liveSet.marker = false;
1877 if (!ret)
1878 break;
1879 func->orderInstructions(this->insns);
1880
1881 ret = buildIntervals.run(func);
1882 if (!ret)
1883 break;
1884 ret = gcra.allocateRegisters(insns);
1885 if (ret)
1886 break; // success
1887 }
1888 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1889
1890 func->tlsSize = insertSpills.getStackSize();
1891 out:
1892 return ret;
1893 }
1894
1895 // TODO: check if modifying Instruction::join here breaks anything
1896 void
1897 GCRA::resolveSplitsAndMerges()
1898 {
1899 for (std::list<Instruction *>::iterator it = splits.begin();
1900 it != splits.end();
1901 ++it) {
1902 Instruction *split = *it;
1903 unsigned int reg = regs.idToBytes(split->getSrc(0));
1904 for (int d = 0; split->defExists(d); ++d) {
1905 Value *v = split->getDef(d);
1906 v->reg.data.id = regs.bytesToId(v, reg);
1907 v->join = v;
1908 reg += v->reg.size;
1909 }
1910 }
1911 splits.clear();
1912
1913 for (std::list<Instruction *>::iterator it = merges.begin();
1914 it != merges.end();
1915 ++it) {
1916 Instruction *merge = *it;
1917 unsigned int reg = regs.idToBytes(merge->getDef(0));
1918 for (int s = 0; merge->srcExists(s); ++s) {
1919 Value *v = merge->getSrc(s);
1920 v->reg.data.id = regs.bytesToId(v, reg);
1921 v->join = v;
1922 // If the value is defined by a phi/union node, we also need to
1923 // perform the same fixup on that node's sources, since after RA
1924 // their registers should be identical.
1925 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1926 Instruction *phi = v->getInsn();
1927 for (int phis = 0; phi->srcExists(phis); ++phis) {
1928 phi->getSrc(phis)->join = v;
1929 phi->getSrc(phis)->reg.data.id = v->reg.data.id;
1930 }
1931 }
1932 reg += v->reg.size;
1933 }
1934 }
1935 merges.clear();
1936 }
1937
1938 bool Program::registerAllocation()
1939 {
1940 RegAlloc ra(this);
1941 return ra.exec();
1942 }
1943
1944 bool
1945 RegAlloc::InsertConstraintsPass::exec(Function *ir)
1946 {
1947 constrList.clear();
1948
1949 bool ret = run(ir, true, true);
1950 if (ret)
1951 ret = insertConstraintMoves();
1952 return ret;
1953 }
1954
1955 // TODO: make part of texture insn
1956 void
1957 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
1958 {
1959 Value *def[4];
1960 int c, k, d;
1961 uint8_t mask = 0;
1962
1963 for (d = 0, k = 0, c = 0; c < 4; ++c) {
1964 if (!(tex->tex.mask & (1 << c)))
1965 continue;
1966 if (tex->getDef(k)->refCount()) {
1967 mask |= 1 << c;
1968 def[d++] = tex->getDef(k);
1969 }
1970 ++k;
1971 }
1972 tex->tex.mask = mask;
1973
1974 for (c = 0; c < d; ++c)
1975 tex->setDef(c, def[c]);
1976 for (; c < 4; ++c)
1977 tex->setDef(c, NULL);
1978 }
1979
1980 bool
1981 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
1982 {
1983 Value *v = cst->getSrc(s);
1984
1985 // current register allocation can't handle it if a value participates in
1986 // multiple constraints
1987 for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
1988 if (cst != (*it)->getInsn())
1989 return true;
1990 }
1991
1992 // can start at s + 1 because detectConflict is called on all sources
1993 for (int c = s + 1; cst->srcExists(c); ++c)
1994 if (v == cst->getSrc(c))
1995 return true;
1996
1997 Instruction *defi = v->getInsn();
1998
1999 return (!defi || defi->constrainedDefs());
2000 }
2001
2002 void
2003 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
2004 {
2005 Instruction *cst;
2006 int d;
2007
2008 // first, look for an existing identical constraint op
2009 for (std::list<Instruction *>::iterator it = constrList.begin();
2010 it != constrList.end();
2011 ++it) {
2012 cst = (*it);
2013 if (!i->bb->dominatedBy(cst->bb))
2014 break;
2015 for (d = 0; d < n; ++d)
2016 if (cst->getSrc(d) != i->getSrc(d + s))
2017 break;
2018 if (d >= n) {
2019 for (d = 0; d < n; ++d, ++s)
2020 i->setSrc(s, cst->getDef(d));
2021 return;
2022 }
2023 }
2024 cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
2025
2026 for (d = 0; d < n; ++s, ++d) {
2027 cst->setDef(d, new_LValue(func, FILE_GPR));
2028 cst->setSrc(d, i->getSrc(s));
2029 i->setSrc(s, cst->getDef(d));
2030 }
2031 i->bb->insertBefore(i, cst);
2032
2033 constrList.push_back(cst);
2034 }
2035
2036 // Add a dummy use of the pointer source of >= 8 byte loads after the load
2037 // to prevent it from being assigned a register which overlapping the load's
2038 // destination, which would produce random corruptions.
2039 void
2040 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
2041 {
2042 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
2043 hzd->setSrc(0, src->get());
2044 i->bb->insertAfter(i, hzd);
2045
2046 }
2047
2048 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
2049 void
2050 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
2051 {
2052 int n;
2053 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n);
2054 condenseDefs(insn, 0, n - 1);
2055 }
2056
2057 void
2058 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn,
2059 const int a, const int b)
2060 {
2061 uint8_t size = 0;
2062 if (a >= b)
2063 return;
2064 for (int s = a; s <= b; ++s)
2065 size += insn->getDef(s)->reg.size;
2066 if (!size)
2067 return;
2068
2069 LValue *lval = new_LValue(func, FILE_GPR);
2070 lval->reg.size = size;
2071
2072 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
2073 split->setSrc(0, lval);
2074 for (int d = a; d <= b; ++d) {
2075 split->setDef(d - a, insn->getDef(d));
2076 insn->setDef(d, NULL);
2077 }
2078 insn->setDef(a, lval);
2079
2080 for (int k = a + 1, d = b + 1; insn->defExists(d); ++d, ++k) {
2081 insn->setDef(k, insn->getDef(d));
2082 insn->setDef(d, NULL);
2083 }
2084 // carry over predicate if any (mainly for OP_UNION uses)
2085 split->setPredicate(insn->cc, insn->getPredicate());
2086
2087 insn->bb->insertAfter(insn, split);
2088 constrList.push_back(split);
2089 }
2090
2091 void
2092 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
2093 const int a, const int b)
2094 {
2095 uint8_t size = 0;
2096 if (a >= b)
2097 return;
2098 for (int s = a; s <= b; ++s)
2099 size += insn->getSrc(s)->reg.size;
2100 if (!size)
2101 return;
2102 LValue *lval = new_LValue(func, FILE_GPR);
2103 lval->reg.size = size;
2104
2105 Value *save[3];
2106 insn->takeExtraSources(0, save);
2107
2108 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
2109 merge->setDef(0, lval);
2110 for (int s = a, i = 0; s <= b; ++s, ++i) {
2111 merge->setSrc(i, insn->getSrc(s));
2112 }
2113 insn->moveSources(b + 1, a - b);
2114 insn->setSrc(a, lval);
2115 insn->bb->insertBefore(insn, merge);
2116
2117 insn->putExtraSources(0, save);
2118
2119 constrList.push_back(merge);
2120 }
2121
2122 void
2123 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
2124 {
2125 int n, s;
2126
2127 if (isTextureOp(tex->op))
2128 textureMask(tex);
2129 condenseDefs(tex);
2130
2131 if (isSurfaceOp(tex->op)) {
2132 int s = tex->tex.target.getDim() +
2133 (tex->tex.target.isArray() || tex->tex.target.isCube());
2134 int n = 0;
2135
2136 switch (tex->op) {
2137 case OP_SUSTB:
2138 case OP_SUSTP:
2139 n = 4;
2140 break;
2141 case OP_SUREDB:
2142 case OP_SUREDP:
2143 if (tex->subOp == NV50_IR_SUBOP_ATOM_CAS)
2144 n = 2;
2145 break;
2146 default:
2147 break;
2148 }
2149
2150 if (s > 1)
2151 condenseSrcs(tex, 0, s - 1);
2152 if (n > 1)
2153 condenseSrcs(tex, 1, n); // do not condense the tex handle
2154 } else
2155 if (isTextureOp(tex->op)) {
2156 if (tex->op != OP_TXQ) {
2157 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2158 if (tex->op == OP_TXD) {
2159 // Indirect handle belongs in the first arg
2160 if (tex->tex.rIndirectSrc >= 0)
2161 s++;
2162 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
2163 s++;
2164 }
2165 n = tex->srcCount(0xff) - s;
2166 } else {
2167 s = tex->srcCount(0xff);
2168 n = 0;
2169 }
2170
2171 if (s > 1)
2172 condenseSrcs(tex, 0, s - 1);
2173 if (n > 1) // NOTE: first call modified positions already
2174 condenseSrcs(tex, 1, n);
2175 }
2176 }
2177
2178 void
2179 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
2180 {
2181 if (isTextureOp(tex->op))
2182 textureMask(tex);
2183 condenseDefs(tex);
2184
2185 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
2186 condenseSrcs(tex, 3, 6);
2187 } else
2188 if (isTextureOp(tex->op)) {
2189 int n = tex->srcCount(0xff, true);
2190 if (n > 4) {
2191 condenseSrcs(tex, 0, 3);
2192 if (n > 5) // NOTE: first call modified positions already
2193 condenseSrcs(tex, 4 - (4 - 1), n - 1 - (4 - 1));
2194 } else
2195 if (n > 1) {
2196 condenseSrcs(tex, 0, n - 1);
2197 }
2198 }
2199 }
2200
2201 void
2202 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
2203 {
2204 int n, s;
2205
2206 if (isTextureOp(tex->op))
2207 textureMask(tex);
2208
2209 if (tex->op == OP_TXQ) {
2210 s = tex->srcCount(0xff);
2211 n = 0;
2212 } else if (isSurfaceOp(tex->op)) {
2213 s = tex->tex.target.getDim() + (tex->tex.target.isArray() || tex->tex.target.isCube());
2214 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP)
2215 n = 4;
2216 else
2217 n = 0;
2218 } else {
2219 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2220 if (!tex->tex.target.isArray() &&
2221 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
2222 ++s;
2223 if (tex->op == OP_TXD && tex->tex.useOffsets)
2224 ++s;
2225 n = tex->srcCount(0xff) - s;
2226 assert(n <= 4);
2227 }
2228
2229 if (s > 1)
2230 condenseSrcs(tex, 0, s - 1);
2231 if (n > 1) // NOTE: first call modified positions already
2232 condenseSrcs(tex, 1, n);
2233
2234 condenseDefs(tex);
2235 }
2236
2237 void
2238 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
2239 {
2240 Value *pred = tex->getPredicate();
2241 if (pred)
2242 tex->setPredicate(tex->cc, NULL);
2243
2244 textureMask(tex);
2245
2246 assert(tex->defExists(0) && tex->srcExists(0));
2247 // make src and def count match
2248 int c;
2249 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2250 if (!tex->srcExists(c))
2251 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2252 else
2253 insertConstraintMove(tex, c);
2254 if (!tex->defExists(c))
2255 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2256 }
2257 if (pred)
2258 tex->setPredicate(tex->cc, pred);
2259 condenseDefs(tex);
2260 condenseSrcs(tex, 0, c - 1);
2261 }
2262
2263 // Insert constraint markers for instructions whose multiple sources must be
2264 // located in consecutive registers.
2265 bool
2266 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2267 {
2268 TexInstruction *tex;
2269 Instruction *next;
2270 int s, size;
2271
2272 targ = bb->getProgram()->getTarget();
2273
2274 for (Instruction *i = bb->getEntry(); i; i = next) {
2275 next = i->next;
2276
2277 if ((tex = i->asTex())) {
2278 switch (targ->getChipset() & ~0xf) {
2279 case 0x50:
2280 case 0x80:
2281 case 0x90:
2282 case 0xa0:
2283 texConstraintNV50(tex);
2284 break;
2285 case 0xc0:
2286 case 0xd0:
2287 texConstraintNVC0(tex);
2288 break;
2289 case 0xe0:
2290 case 0xf0:
2291 case 0x100:
2292 texConstraintNVE0(tex);
2293 break;
2294 case 0x110:
2295 case 0x120:
2296 case 0x130:
2297 texConstraintGM107(tex);
2298 break;
2299 default:
2300 break;
2301 }
2302 } else
2303 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2304 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2305 assert(i->srcExists(s));
2306 size -= i->getSrc(s)->reg.size;
2307 }
2308 condenseSrcs(i, 1, s - 1);
2309 } else
2310 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2311 condenseDefs(i);
2312 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2313 addHazard(i, i->src(0).getIndirect(0));
2314 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2315 addHazard(i, i->src(0).getIndirect(1));
2316 } else
2317 if (i->op == OP_UNION ||
2318 i->op == OP_MERGE ||
2319 i->op == OP_SPLIT) {
2320 constrList.push_back(i);
2321 }
2322 }
2323 return true;
2324 }
2325
2326 void
2327 RegAlloc::InsertConstraintsPass::insertConstraintMove(Instruction *cst, int s)
2328 {
2329 const uint8_t size = cst->src(s).getSize();
2330
2331 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2332
2333 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2334 bool imm = defi->op == OP_MOV &&
2335 defi->src(0).getFile() == FILE_IMMEDIATE;
2336 bool load = defi->op == OP_LOAD &&
2337 defi->src(0).getFile() == FILE_MEMORY_CONST &&
2338 !defi->src(0).isIndirect(0);
2339 // catch some cases where don't really need MOVs
2340 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs()) {
2341 if (imm || load) {
2342 // Move the defi right before the cst. No point in expanding
2343 // the range.
2344 defi->bb->remove(defi);
2345 cst->bb->insertBefore(cst, defi);
2346 }
2347 return;
2348 }
2349
2350 LValue *lval = new_LValue(func, cst->src(s).getFile());
2351 lval->reg.size = size;
2352
2353 Instruction *mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2354 mov->setDef(0, lval);
2355 mov->setSrc(0, cst->getSrc(s));
2356
2357 if (load) {
2358 mov->op = OP_LOAD;
2359 mov->setSrc(0, defi->getSrc(0));
2360 } else if (imm) {
2361 mov->setSrc(0, defi->getSrc(0));
2362 }
2363
2364 if (defi->getPredicate())
2365 mov->setPredicate(defi->cc, defi->getPredicate());
2366
2367 cst->setSrc(s, mov->getDef(0));
2368 cst->bb->insertBefore(cst, mov);
2369
2370 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2371 }
2372
2373 // Insert extra moves so that, if multiple register constraints on a value are
2374 // in conflict, these conflicts can be resolved.
2375 bool
2376 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2377 {
2378 for (std::list<Instruction *>::iterator it = constrList.begin();
2379 it != constrList.end();
2380 ++it) {
2381 Instruction *cst = *it;
2382 Instruction *mov;
2383
2384 if (cst->op == OP_SPLIT && 0) {
2385 // spilling splits is annoying, just make sure they're separate
2386 for (int d = 0; cst->defExists(d); ++d) {
2387 if (!cst->getDef(d)->refCount())
2388 continue;
2389 LValue *lval = new_LValue(func, cst->def(d).getFile());
2390 const uint8_t size = cst->def(d).getSize();
2391 lval->reg.size = size;
2392
2393 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2394 mov->setSrc(0, lval);
2395 mov->setDef(0, cst->getDef(d));
2396 cst->setDef(d, mov->getSrc(0));
2397 cst->bb->insertAfter(cst, mov);
2398
2399 cst->getSrc(0)->asLValue()->noSpill = 1;
2400 mov->getSrc(0)->asLValue()->noSpill = 1;
2401 }
2402 } else
2403 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2404 for (int s = 0; cst->srcExists(s); ++s) {
2405 const uint8_t size = cst->src(s).getSize();
2406
2407 if (!cst->getSrc(s)->defs.size()) {
2408 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2409 mov->setDef(0, cst->getSrc(s));
2410 cst->bb->insertBefore(cst, mov);
2411 continue;
2412 }
2413
2414 insertConstraintMove(cst, s);
2415 }
2416 }
2417 }
2418
2419 return true;
2420 }
2421
2422 } // namespace nv50_ir