nvir/gv100: enable support for tu1xx
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_ra.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25
26 #include <algorithm>
27 #include <stack>
28 #include <limits>
29 #if __cplusplus >= 201103L
30 #include <unordered_map>
31 #else
32 #include <tr1/unordered_map>
33 #endif
34
35 namespace nv50_ir {
36
37 #if __cplusplus >= 201103L
38 using std::hash;
39 using std::unordered_map;
40 #else
41 using std::tr1::hash;
42 using std::tr1::unordered_map;
43 #endif
44
45 #define MAX_REGISTER_FILE_SIZE 256
46
47 class RegisterSet
48 {
49 public:
50 RegisterSet(const Target *);
51
52 void init(const Target *);
53 void reset(DataFile, bool resetMax = false);
54
55 void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
56 void intersect(DataFile f, const RegisterSet *);
57
58 bool assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg);
59 void release(DataFile f, int32_t reg, unsigned int size);
60 void occupy(DataFile f, int32_t reg, unsigned int size);
61 void occupy(const Value *);
62 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
63 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
64 bool testOccupy(const Value *);
65 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
66
67 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
68
69 inline unsigned int getFileSize(DataFile f) const
70 {
71 return last[f] + 1;
72 }
73
74 inline unsigned int units(DataFile f, unsigned int size) const
75 {
76 return size >> unit[f];
77 }
78 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
79 inline unsigned int idToBytes(const Value *v) const
80 {
81 return v->reg.data.id * MIN2(v->reg.size, 4);
82 }
83 inline unsigned int idToUnits(const Value *v) const
84 {
85 return units(v->reg.file, idToBytes(v));
86 }
87 inline int bytesToId(Value *v, unsigned int bytes) const
88 {
89 if (v->reg.size < 4)
90 return units(v->reg.file, bytes);
91 return bytes / 4;
92 }
93 inline int unitsToId(DataFile f, int u, uint8_t size) const
94 {
95 if (u < 0)
96 return -1;
97 return (size < 4) ? u : ((u << unit[f]) / 4);
98 }
99
100 void print(DataFile f) const;
101
102 const bool restrictedGPR16Range;
103
104 private:
105 BitSet bits[LAST_REGISTER_FILE + 1];
106
107 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
108
109 int last[LAST_REGISTER_FILE + 1];
110 int fill[LAST_REGISTER_FILE + 1];
111 };
112
113 void
114 RegisterSet::reset(DataFile f, bool resetMax)
115 {
116 bits[f].fill(0);
117 if (resetMax)
118 fill[f] = -1;
119 }
120
121 void
122 RegisterSet::init(const Target *targ)
123 {
124 for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
125 DataFile f = static_cast<DataFile>(rf);
126 last[rf] = targ->getFileSize(f) - 1;
127 unit[rf] = targ->getFileUnit(f);
128 fill[rf] = -1;
129 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
130 bits[rf].allocate(last[rf] + 1, true);
131 }
132 }
133
134 RegisterSet::RegisterSet(const Target *targ)
135 : restrictedGPR16Range(targ->getChipset() < 0xc0)
136 {
137 init(targ);
138 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
139 reset(static_cast<DataFile>(i));
140 }
141
142 void
143 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
144 {
145 bits[f].periodicMask32(lock, unlock);
146 }
147
148 void
149 RegisterSet::intersect(DataFile f, const RegisterSet *set)
150 {
151 bits[f] |= set->bits[f];
152 }
153
154 void
155 RegisterSet::print(DataFile f) const
156 {
157 INFO("GPR:");
158 bits[f].print();
159 INFO("\n");
160 }
161
162 bool
163 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg)
164 {
165 reg = bits[f].findFreeRange(size, maxReg);
166 if (reg < 0)
167 return false;
168 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
169 return true;
170 }
171
172 bool
173 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
174 {
175 return bits[f].testRange(reg, size);
176 }
177
178 void
179 RegisterSet::occupy(const Value *v)
180 {
181 occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
182 }
183
184 void
185 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
186 {
187 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
188 }
189
190 void
191 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
192 {
193 bits[f].setRange(reg, size);
194
195 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
196
197 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
198 }
199
200 bool
201 RegisterSet::testOccupy(const Value *v)
202 {
203 return testOccupy(v->reg.file,
204 idToUnits(v), v->reg.size >> unit[v->reg.file]);
205 }
206
207 bool
208 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
209 {
210 if (isOccupied(f, reg, size))
211 return false;
212 occupy(f, reg, size);
213 return true;
214 }
215
216 void
217 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
218 {
219 bits[f].clrRange(reg, size);
220
221 INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
222 }
223
224 class RegAlloc
225 {
226 public:
227 RegAlloc(Program *program) : prog(program), sequence(0) { }
228
229 bool exec();
230 bool execFunc();
231
232 private:
233 class PhiMovesPass : public Pass {
234 private:
235 virtual bool visit(BasicBlock *);
236 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
237 inline void splitEdges(BasicBlock *b);
238 };
239
240 class ArgumentMovesPass : public Pass {
241 private:
242 virtual bool visit(BasicBlock *);
243 };
244
245 class BuildIntervalsPass : public Pass {
246 private:
247 virtual bool visit(BasicBlock *);
248 void collectLiveValues(BasicBlock *);
249 void addLiveRange(Value *, const BasicBlock *, int end);
250 };
251
252 class InsertConstraintsPass : public Pass {
253 public:
254 bool exec(Function *func);
255 private:
256 virtual bool visit(BasicBlock *);
257
258 void insertConstraintMove(Instruction *, int s);
259 bool insertConstraintMoves();
260
261 void condenseDefs(Instruction *);
262 void condenseDefs(Instruction *, const int first, const int last);
263 void condenseSrcs(Instruction *, const int first, const int last);
264
265 void addHazard(Instruction *i, const ValueRef *src);
266 void textureMask(TexInstruction *);
267 void addConstraint(Instruction *, int s, int n);
268 bool detectConflict(Instruction *, int s);
269
270 // target specific functions, TODO: put in subclass or Target
271 void texConstraintNV50(TexInstruction *);
272 void texConstraintNVC0(TexInstruction *);
273 void texConstraintNVE0(TexInstruction *);
274 void texConstraintGM107(TexInstruction *);
275
276 bool isScalarTexGM107(TexInstruction *);
277 void handleScalarTexGM107(TexInstruction *);
278
279 std::list<Instruction *> constrList;
280
281 const Target *targ;
282 };
283
284 bool buildLiveSets(BasicBlock *);
285
286 private:
287 Program *prog;
288 Function *func;
289
290 // instructions in control flow / chronological order
291 ArrayList insns;
292
293 int sequence; // for manual passes through CFG
294 };
295
296 typedef std::pair<Value *, Value *> ValuePair;
297
298 class SpillCodeInserter
299 {
300 public:
301 SpillCodeInserter(Function *fn) : func(fn), stackSize(0), stackBase(0) { }
302
303 bool run(const std::list<ValuePair>&);
304
305 Symbol *assignSlot(const Interval&, const unsigned int size);
306 Value *offsetSlot(Value *, const LValue *);
307 inline int32_t getStackSize() const { return stackSize; }
308
309 private:
310 Function *func;
311
312 struct SpillSlot
313 {
314 Interval occup;
315 std::list<Value *> residents; // needed to recalculate occup
316 Symbol *sym;
317 int32_t offset;
318 inline uint8_t size() const { return sym->reg.size; }
319 };
320 std::list<SpillSlot> slots;
321 int32_t stackSize;
322 int32_t stackBase;
323
324 LValue *unspill(Instruction *usei, LValue *, Value *slot);
325 void spill(Instruction *defi, Value *slot, LValue *);
326 };
327
328 void
329 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
330 const BasicBlock *bb,
331 int end)
332 {
333 Instruction *insn = val->getUniqueInsn();
334
335 if (!insn)
336 insn = bb->getFirst();
337
338 assert(bb->getFirst()->serial <= bb->getExit()->serial);
339 assert(bb->getExit()->serial + 1 >= end);
340
341 int begin = insn->serial;
342 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
343 begin = bb->getEntry()->serial;
344
345 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
346 val->id, begin, insn->serial, end);
347
348 if (begin != end) // empty ranges are only added as hazards for fixed regs
349 val->livei.extend(begin, end);
350 }
351
352 bool
353 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
354 {
355 if (b->cfg.incidentCount() <= 1)
356 return false;
357
358 int n = 0;
359 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
360 if (ei.getType() == Graph::Edge::TREE ||
361 ei.getType() == Graph::Edge::FORWARD)
362 ++n;
363 return (n == 2);
364 }
365
366 struct PhiMapHash {
367 size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
368 return hash<Instruction*>()(val.first) * 31 +
369 hash<BasicBlock*>()(val.second);
370 }
371 };
372
373 typedef unordered_map<
374 std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
375
376 // Critical edges need to be split up so that work can be inserted along
377 // specific edge transitions. Unfortunately manipulating incident edges into a
378 // BB invalidates all the PHI nodes since their sources are implicitly ordered
379 // by incident edge order.
380 //
381 // TODO: Make it so that that is not the case, and PHI nodes store pointers to
382 // the original BBs.
383 void
384 RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
385 {
386 BasicBlock *pb, *pn;
387 Instruction *phi;
388 Graph::EdgeIterator ei;
389 std::stack<BasicBlock *> stack;
390 int j = 0;
391
392 for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
393 pb = BasicBlock::get(ei.getNode());
394 assert(pb);
395 if (needNewElseBlock(bb, pb))
396 stack.push(pb);
397 }
398
399 // No critical edges were found, no need to perform any work.
400 if (stack.empty())
401 return;
402
403 // We're about to, potentially, reorder the inbound edges. This means that
404 // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
405 // nodes after the graph has been modified.
406 PhiMap phis;
407
408 j = 0;
409 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
410 pb = BasicBlock::get(ei.getNode());
411 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
412 phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
413 }
414
415 while (!stack.empty()) {
416 pb = stack.top();
417 pn = new BasicBlock(func);
418 stack.pop();
419
420 pb->cfg.detach(&bb->cfg);
421 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
422 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
423
424 assert(pb->getExit()->op != OP_CALL);
425 if (pb->getExit()->asFlow()->target.bb == bb)
426 pb->getExit()->asFlow()->target.bb = pn;
427
428 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
429 PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
430 assert(it != phis.end());
431 phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
432 phis.erase(it);
433 }
434 }
435
436 // Now go through and fix up all of the phi node sources.
437 j = 0;
438 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
439 pb = BasicBlock::get(ei.getNode());
440 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
441 PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
442 assert(it != phis.end());
443
444 phi->setSrc(j, it->second);
445 }
446 }
447 }
448
449 // For each operand of each PHI in b, generate a new value by inserting a MOV
450 // at the end of the block it is coming from and replace the operand with its
451 // result. This eliminates liveness conflicts and enables us to let values be
452 // copied to the right register if such a conflict exists nonetheless.
453 //
454 // These MOVs are also crucial in making sure the live intervals of phi srces
455 // are extended until the end of the loop, since they are not included in the
456 // live-in sets.
457 bool
458 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
459 {
460 Instruction *phi, *mov;
461
462 splitEdges(bb);
463
464 // insert MOVs (phi->src(j) should stem from j-th in-BB)
465 int j = 0;
466 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
467 BasicBlock *pb = BasicBlock::get(ei.getNode());
468 if (!pb->isTerminated())
469 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
470
471 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
472 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
473 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
474
475 mov->setSrc(0, phi->getSrc(j));
476 mov->setDef(0, tmp);
477 phi->setSrc(j, tmp);
478
479 pb->insertBefore(pb->getExit(), mov);
480 }
481 ++j;
482 }
483
484 return true;
485 }
486
487 bool
488 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
489 {
490 // Bind function call inputs/outputs to the same physical register
491 // the callee uses, inserting moves as appropriate for the case a
492 // conflict arises.
493 for (Instruction *i = bb->getEntry(); i; i = i->next) {
494 FlowInstruction *cal = i->asFlow();
495 // TODO: Handle indirect calls.
496 // Right now they should only be generated for builtins.
497 if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
498 continue;
499 RegisterSet clobberSet(prog->getTarget());
500
501 // Bind input values.
502 for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
503 const int t = cal->indirect ? (s - 1) : s;
504 LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
505 tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
506
507 Instruction *mov =
508 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
509 mov->setDef(0, tmp);
510 mov->setSrc(0, cal->getSrc(s));
511 cal->setSrc(s, tmp);
512
513 bb->insertBefore(cal, mov);
514 }
515
516 // Bind output values.
517 for (int d = 0; cal->defExists(d); ++d) {
518 LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
519 tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
520
521 Instruction *mov =
522 new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
523 mov->setSrc(0, tmp);
524 mov->setDef(0, cal->getDef(d));
525 cal->setDef(d, tmp);
526
527 bb->insertAfter(cal, mov);
528 clobberSet.occupy(tmp);
529 }
530
531 // Bind clobbered values.
532 for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
533 it != cal->target.fn->clobbers.end();
534 ++it) {
535 if (clobberSet.testOccupy(*it)) {
536 Value *tmp = new_LValue(func, (*it)->asLValue());
537 tmp->reg.data.id = (*it)->reg.data.id;
538 cal->setDef(cal->defCount(), tmp);
539 }
540 }
541 }
542
543 // Update the clobber set of the function.
544 if (BasicBlock::get(func->cfgExit) == bb) {
545 func->buildDefSets();
546 for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
547 if (bb->defSet.test(i))
548 func->clobbers.push_back(func->getLValue(i));
549 }
550
551 return true;
552 }
553
554 // Build the set of live-in variables of bb.
555 bool
556 RegAlloc::buildLiveSets(BasicBlock *bb)
557 {
558 Function *f = bb->getFunction();
559 BasicBlock *bn;
560 Instruction *i;
561 unsigned int s, d;
562
563 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
564
565 bb->liveSet.allocate(func->allLValues.getSize(), false);
566
567 int n = 0;
568 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
569 bn = BasicBlock::get(ei.getNode());
570 if (bn == bb)
571 continue;
572 if (bn->cfg.visit(sequence))
573 if (!buildLiveSets(bn))
574 return false;
575 if (n++ || bb->liveSet.marker)
576 bb->liveSet |= bn->liveSet;
577 else
578 bb->liveSet = bn->liveSet;
579 }
580 if (!n && !bb->liveSet.marker)
581 bb->liveSet.fill(0);
582 bb->liveSet.marker = true;
583
584 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
585 INFO("BB:%i live set of out blocks:\n", bb->getId());
586 bb->liveSet.print();
587 }
588
589 // if (!bb->getEntry())
590 // return true;
591
592 if (bb == BasicBlock::get(f->cfgExit)) {
593 for (std::deque<ValueRef>::iterator it = f->outs.begin();
594 it != f->outs.end(); ++it) {
595 assert(it->get()->asLValue());
596 bb->liveSet.set(it->get()->id);
597 }
598 }
599
600 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
601 for (d = 0; i->defExists(d); ++d)
602 bb->liveSet.clr(i->getDef(d)->id);
603 for (s = 0; i->srcExists(s); ++s)
604 if (i->getSrc(s)->asLValue())
605 bb->liveSet.set(i->getSrc(s)->id);
606 }
607 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
608 bb->liveSet.clr(i->getDef(0)->id);
609
610 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
611 INFO("BB:%i live set after propagation:\n", bb->getId());
612 bb->liveSet.print();
613 }
614
615 return true;
616 }
617
618 void
619 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
620 {
621 BasicBlock *bbA = NULL, *bbB = NULL;
622
623 if (bb->cfg.outgoingCount()) {
624 // trickery to save a loop of OR'ing liveSets
625 // aliasing works fine with BitSet::setOr
626 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
627 if (bbA) {
628 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
629 bbA = bb;
630 } else {
631 bbA = bbB;
632 }
633 bbB = BasicBlock::get(ei.getNode());
634 }
635 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
636 } else
637 if (bb->cfg.incidentCount()) {
638 bb->liveSet.fill(0);
639 }
640 }
641
642 bool
643 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
644 {
645 collectLiveValues(bb);
646
647 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
648
649 // go through out blocks and delete phi sources that do not originate from
650 // the current block from the live set
651 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
652 BasicBlock *out = BasicBlock::get(ei.getNode());
653
654 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
655 bb->liveSet.clr(i->getDef(0)->id);
656
657 for (int s = 0; i->srcExists(s); ++s) {
658 assert(i->src(s).getInsn());
659 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
660 bb->liveSet.set(i->getSrc(s)->id);
661 else
662 bb->liveSet.clr(i->getSrc(s)->id);
663 }
664 }
665 }
666
667 // remaining live-outs are live until end
668 if (bb->getExit()) {
669 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
670 if (bb->liveSet.test(j))
671 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
672 }
673
674 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
675 for (int d = 0; i->defExists(d); ++d) {
676 bb->liveSet.clr(i->getDef(d)->id);
677 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
678 i->getDef(d)->livei.extend(i->serial, i->serial);
679 }
680
681 for (int s = 0; i->srcExists(s); ++s) {
682 if (!i->getSrc(s)->asLValue())
683 continue;
684 if (!bb->liveSet.test(i->getSrc(s)->id)) {
685 bb->liveSet.set(i->getSrc(s)->id);
686 addLiveRange(i->getSrc(s), bb, i->serial);
687 }
688 }
689 }
690
691 if (bb == BasicBlock::get(func->cfg.getRoot())) {
692 for (std::deque<ValueDef>::iterator it = func->ins.begin();
693 it != func->ins.end(); ++it) {
694 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
695 it->get()->livei.extend(0, 1);
696 }
697 }
698
699 return true;
700 }
701
702
703 #define JOIN_MASK_PHI (1 << 0)
704 #define JOIN_MASK_UNION (1 << 1)
705 #define JOIN_MASK_MOV (1 << 2)
706 #define JOIN_MASK_TEX (1 << 3)
707
708 class GCRA
709 {
710 public:
711 GCRA(Function *, SpillCodeInserter&);
712 ~GCRA();
713
714 bool allocateRegisters(ArrayList& insns);
715
716 void printNodeInfo() const;
717
718 private:
719 class RIG_Node : public Graph::Node
720 {
721 public:
722 RIG_Node();
723
724 void init(const RegisterSet&, LValue *);
725
726 void addInterference(RIG_Node *);
727 void addRegPreference(RIG_Node *);
728
729 inline LValue *getValue() const
730 {
731 return reinterpret_cast<LValue *>(data);
732 }
733 inline void setValue(LValue *lval) { data = lval; }
734
735 inline uint8_t getCompMask() const
736 {
737 return ((1 << colors) - 1) << (reg & 7);
738 }
739
740 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
741 {
742 return static_cast<RIG_Node *>(ei.getNode());
743 }
744
745 public:
746 uint32_t degree;
747 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
748 uint16_t maxReg;
749 uint16_t colors;
750
751 DataFile f;
752 int32_t reg;
753
754 float weight;
755
756 // list pointers for simplify() phase
757 RIG_Node *next;
758 RIG_Node *prev;
759
760 // union of the live intervals of all coalesced values (we want to retain
761 // the separate intervals for testing interference of compound values)
762 Interval livei;
763
764 std::list<RIG_Node *> prefRegs;
765 };
766
767 private:
768 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
769
770 void buildRIG(ArrayList&);
771 bool coalesce(ArrayList&);
772 bool doCoalesce(ArrayList&, unsigned int mask);
773 void calculateSpillWeights();
774 bool simplify();
775 bool selectRegisters();
776 void cleanup(const bool success);
777
778 void simplifyEdge(RIG_Node *, RIG_Node *);
779 void simplifyNode(RIG_Node *);
780
781 bool coalesceValues(Value *, Value *, bool force);
782 void resolveSplitsAndMerges();
783 void makeCompound(Instruction *, bool isSplit);
784
785 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
786
787 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
788 void checkList(std::list<RIG_Node *>&);
789
790 private:
791 std::stack<uint32_t> stack;
792
793 // list headers for simplify() phase
794 RIG_Node lo[2];
795 RIG_Node hi;
796
797 Graph RIG;
798 RIG_Node *nodes;
799 unsigned int nodeCount;
800
801 Function *func;
802 Program *prog;
803
804 struct RelDegree {
805 uint8_t data[17][17];
806
807 RelDegree() {
808 for (int i = 1; i <= 16; ++i)
809 for (int j = 1; j <= 16; ++j)
810 data[i][j] = j * ((i + j - 1) / j);
811 }
812
813 const uint8_t* operator[](std::size_t i) const {
814 return data[i];
815 }
816 };
817
818 static const RelDegree relDegree;
819
820 RegisterSet regs;
821
822 // need to fixup register id for participants of OP_MERGE/SPLIT
823 std::list<Instruction *> merges;
824 std::list<Instruction *> splits;
825
826 SpillCodeInserter& spill;
827 std::list<ValuePair> mustSpill;
828 };
829
830 const GCRA::RelDegree GCRA::relDegree;
831
832 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
833 {
834 colors = 0;
835 }
836
837 void
838 GCRA::printNodeInfo() const
839 {
840 for (unsigned int i = 0; i < nodeCount; ++i) {
841 if (!nodes[i].colors)
842 continue;
843 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
844 i,
845 nodes[i].f,nodes[i].reg,nodes[i].colors,
846 nodes[i].weight,
847 nodes[i].degree, nodes[i].degreeLimit);
848
849 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
850 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
851 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
852 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
853 INFO("\n");
854 }
855 }
856
857 static bool
858 isShortRegOp(Instruction *insn)
859 {
860 // Immediates are always in src1 (except zeroes, which end up getting
861 // replaced with a zero reg). Every other situation can be resolved by
862 // using a long encoding.
863 return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE &&
864 insn->getSrc(1)->reg.data.u64;
865 }
866
867 // Check if this LValue is ever used in an instruction that can't be encoded
868 // with long registers (i.e. > r63)
869 static bool
870 isShortRegVal(LValue *lval)
871 {
872 if (lval->getInsn() == NULL)
873 return false;
874 for (Value::DefCIterator def = lval->defs.begin();
875 def != lval->defs.end(); ++def)
876 if (isShortRegOp((*def)->getInsn()))
877 return true;
878 for (Value::UseCIterator use = lval->uses.begin();
879 use != lval->uses.end(); ++use)
880 if (isShortRegOp((*use)->getInsn()))
881 return true;
882 return false;
883 }
884
885 void
886 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
887 {
888 setValue(lval);
889 if (lval->reg.data.id >= 0)
890 lval->noSpill = lval->fixedReg = 1;
891
892 colors = regs.units(lval->reg.file, lval->reg.size);
893 f = lval->reg.file;
894 reg = -1;
895 if (lval->reg.data.id >= 0)
896 reg = regs.idToUnits(lval);
897
898 weight = std::numeric_limits<float>::infinity();
899 degree = 0;
900 maxReg = regs.getFileSize(f);
901 // On nv50, we lose a bit of gpr encoding when there's an embedded
902 // immediate.
903 if (regs.restrictedGPR16Range && f == FILE_GPR && (lval->reg.size == 2 || isShortRegVal(lval)))
904 maxReg /= 2;
905 degreeLimit = maxReg;
906 degreeLimit -= relDegree[1][colors] - 1;
907
908 livei.insert(lval->livei);
909 }
910
911 bool
912 GCRA::coalesceValues(Value *dst, Value *src, bool force)
913 {
914 LValue *rep = dst->join->asLValue();
915 LValue *val = src->join->asLValue();
916
917 if (!force && val->reg.data.id >= 0) {
918 rep = src->join->asLValue();
919 val = dst->join->asLValue();
920 }
921 RIG_Node *nRep = &nodes[rep->id];
922 RIG_Node *nVal = &nodes[val->id];
923
924 if (src->reg.file != dst->reg.file) {
925 if (!force)
926 return false;
927 WARN("forced coalescing of values in different files !\n");
928 }
929 if (!force && dst->reg.size != src->reg.size)
930 return false;
931
932 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
933 if (force) {
934 if (val->reg.data.id >= 0)
935 WARN("forced coalescing of values in different fixed regs !\n");
936 } else {
937 if (val->reg.data.id >= 0)
938 return false;
939 // make sure that there is no overlap with the fixed register of rep
940 for (ArrayList::Iterator it = func->allLValues.iterator();
941 !it.end(); it.next()) {
942 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
943 assert(reg);
944 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
945 return false;
946 }
947 }
948 }
949
950 if (!force && nRep->livei.overlaps(nVal->livei))
951 return false;
952
953 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
954 rep->id, rep->reg.data.id, val->id);
955
956 // set join pointer of all values joined with val
957 for (Value::DefIterator def = val->defs.begin(); def != val->defs.end();
958 ++def)
959 (*def)->get()->join = rep;
960 assert(rep->join == rep && val->join == rep);
961
962 // add val's definitions to rep and extend the live interval of its RIG node
963 rep->defs.insert(rep->defs.end(), val->defs.begin(), val->defs.end());
964 nRep->livei.unify(nVal->livei);
965 nRep->degreeLimit = MIN2(nRep->degreeLimit, nVal->degreeLimit);
966 nRep->maxReg = MIN2(nRep->maxReg, nVal->maxReg);
967 return true;
968 }
969
970 bool
971 GCRA::coalesce(ArrayList& insns)
972 {
973 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
974 if (!ret)
975 return false;
976 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
977 case 0x50:
978 case 0x80:
979 case 0x90:
980 case 0xa0:
981 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
982 break;
983 case 0xc0:
984 case 0xd0:
985 case 0xe0:
986 case 0xf0:
987 case 0x100:
988 case 0x110:
989 case 0x120:
990 case 0x130:
991 case 0x140:
992 case 0x160:
993 ret = doCoalesce(insns, JOIN_MASK_UNION);
994 break;
995 default:
996 break;
997 }
998 if (!ret)
999 return false;
1000 return doCoalesce(insns, JOIN_MASK_MOV);
1001 }
1002
1003 static inline uint8_t makeCompMask(int compSize, int base, int size)
1004 {
1005 uint8_t m = ((1 << size) - 1) << base;
1006
1007 switch (compSize) {
1008 case 1:
1009 return 0xff;
1010 case 2:
1011 m |= (m << 2);
1012 return (m << 4) | m;
1013 case 3:
1014 case 4:
1015 return (m << 4) | m;
1016 default:
1017 assert(compSize <= 8);
1018 return m;
1019 }
1020 }
1021
1022 // Used when coalescing moves. The non-compound value will become one, e.g.:
1023 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
1024 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
1025 static inline void copyCompound(Value *dst, Value *src)
1026 {
1027 LValue *ldst = dst->asLValue();
1028 LValue *lsrc = src->asLValue();
1029
1030 if (ldst->compound && !lsrc->compound) {
1031 LValue *swap = lsrc;
1032 lsrc = ldst;
1033 ldst = swap;
1034 }
1035
1036 ldst->compound = lsrc->compound;
1037 ldst->compMask = lsrc->compMask;
1038 }
1039
1040 void
1041 GCRA::makeCompound(Instruction *insn, bool split)
1042 {
1043 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
1044
1045 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
1046 INFO("makeCompound(split = %i): ", split);
1047 insn->print();
1048 }
1049
1050 const unsigned int size = getNode(rep)->colors;
1051 unsigned int base = 0;
1052
1053 if (!rep->compound)
1054 rep->compMask = 0xff;
1055 rep->compound = 1;
1056
1057 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
1058 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
1059
1060 val->compound = 1;
1061 if (!val->compMask)
1062 val->compMask = 0xff;
1063 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
1064 assert(val->compMask);
1065
1066 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
1067 rep->id, rep->compMask, val->id, val->compMask);
1068
1069 base += getNode(val)->colors;
1070 }
1071 assert(base == size);
1072 }
1073
1074 bool
1075 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
1076 {
1077 int c, n;
1078
1079 for (n = 0; n < insns.getSize(); ++n) {
1080 Instruction *i;
1081 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
1082
1083 switch (insn->op) {
1084 case OP_PHI:
1085 if (!(mask & JOIN_MASK_PHI))
1086 break;
1087 for (c = 0; insn->srcExists(c); ++c)
1088 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
1089 // this is bad
1090 ERROR("failed to coalesce phi operands\n");
1091 return false;
1092 }
1093 break;
1094 case OP_UNION:
1095 case OP_MERGE:
1096 if (!(mask & JOIN_MASK_UNION))
1097 break;
1098 for (c = 0; insn->srcExists(c); ++c)
1099 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
1100 if (insn->op == OP_MERGE) {
1101 merges.push_back(insn);
1102 if (insn->srcExists(1))
1103 makeCompound(insn, false);
1104 }
1105 break;
1106 case OP_SPLIT:
1107 if (!(mask & JOIN_MASK_UNION))
1108 break;
1109 splits.push_back(insn);
1110 for (c = 0; insn->defExists(c); ++c)
1111 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
1112 makeCompound(insn, true);
1113 break;
1114 case OP_MOV:
1115 if (!(mask & JOIN_MASK_MOV))
1116 break;
1117 i = NULL;
1118 if (!insn->getDef(0)->uses.empty())
1119 i = (*insn->getDef(0)->uses.begin())->getInsn();
1120 // if this is a contraint-move there will only be a single use
1121 if (i && i->op == OP_MERGE) // do we really still need this ?
1122 break;
1123 i = insn->getSrc(0)->getUniqueInsn();
1124 if (i && !i->constrainedDefs()) {
1125 if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
1126 copyCompound(insn->getSrc(0), insn->getDef(0));
1127 }
1128 break;
1129 case OP_TEX:
1130 case OP_TXB:
1131 case OP_TXL:
1132 case OP_TXF:
1133 case OP_TXQ:
1134 case OP_TXD:
1135 case OP_TXG:
1136 case OP_TXLQ:
1137 case OP_TEXCSAA:
1138 case OP_TEXPREP:
1139 if (!(mask & JOIN_MASK_TEX))
1140 break;
1141 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1142 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1143 break;
1144 default:
1145 break;
1146 }
1147 }
1148 return true;
1149 }
1150
1151 void
1152 GCRA::RIG_Node::addInterference(RIG_Node *node)
1153 {
1154 this->degree += relDegree[node->colors][colors];
1155 node->degree += relDegree[colors][node->colors];
1156
1157 this->attach(node, Graph::Edge::CROSS);
1158 }
1159
1160 void
1161 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1162 {
1163 prefRegs.push_back(node);
1164 }
1165
1166 GCRA::GCRA(Function *fn, SpillCodeInserter& spill) :
1167 func(fn),
1168 regs(fn->getProgram()->getTarget()),
1169 spill(spill)
1170 {
1171 prog = func->getProgram();
1172 }
1173
1174 GCRA::~GCRA()
1175 {
1176 if (nodes)
1177 delete[] nodes;
1178 }
1179
1180 void
1181 GCRA::checkList(std::list<RIG_Node *>& lst)
1182 {
1183 GCRA::RIG_Node *prev = NULL;
1184
1185 for (std::list<RIG_Node *>::iterator it = lst.begin();
1186 it != lst.end();
1187 ++it) {
1188 assert((*it)->getValue()->join == (*it)->getValue());
1189 if (prev)
1190 assert(prev->livei.begin() <= (*it)->livei.begin());
1191 prev = *it;
1192 }
1193 }
1194
1195 void
1196 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1197 {
1198 if (node->livei.isEmpty())
1199 return;
1200 // only the intervals of joined values don't necessarily arrive in order
1201 std::list<RIG_Node *>::iterator prev, it;
1202 for (it = list.end(); it != list.begin(); it = prev) {
1203 prev = it;
1204 --prev;
1205 if ((*prev)->livei.begin() <= node->livei.begin())
1206 break;
1207 }
1208 list.insert(it, node);
1209 }
1210
1211 void
1212 GCRA::buildRIG(ArrayList& insns)
1213 {
1214 std::list<RIG_Node *> values, active;
1215
1216 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1217 it != func->ins.end(); ++it)
1218 insertOrderedTail(values, getNode(it->get()->asLValue()));
1219
1220 for (int i = 0; i < insns.getSize(); ++i) {
1221 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1222 for (int d = 0; insn->defExists(d); ++d)
1223 if (insn->getDef(d)->rep() == insn->getDef(d))
1224 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1225 }
1226 checkList(values);
1227
1228 while (!values.empty()) {
1229 RIG_Node *cur = values.front();
1230
1231 for (std::list<RIG_Node *>::iterator it = active.begin();
1232 it != active.end();) {
1233 RIG_Node *node = *it;
1234
1235 if (node->livei.end() <= cur->livei.begin()) {
1236 it = active.erase(it);
1237 } else {
1238 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1239 cur->addInterference(node);
1240 ++it;
1241 }
1242 }
1243 values.pop_front();
1244 active.push_back(cur);
1245 }
1246 }
1247
1248 void
1249 GCRA::calculateSpillWeights()
1250 {
1251 for (unsigned int i = 0; i < nodeCount; ++i) {
1252 RIG_Node *const n = &nodes[i];
1253 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1254 continue;
1255 if (nodes[i].reg >= 0) {
1256 // update max reg
1257 regs.occupy(n->f, n->reg, n->colors);
1258 continue;
1259 }
1260 LValue *val = nodes[i].getValue();
1261
1262 if (!val->noSpill) {
1263 int rc = 0;
1264 for (Value::DefIterator it = val->defs.begin();
1265 it != val->defs.end();
1266 ++it)
1267 rc += (*it)->get()->refCount();
1268
1269 nodes[i].weight =
1270 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1271 }
1272
1273 if (nodes[i].degree < nodes[i].degreeLimit) {
1274 int l = 0;
1275 if (val->reg.size > 4)
1276 l = 1;
1277 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1278 } else {
1279 DLLIST_ADDHEAD(&hi, &nodes[i]);
1280 }
1281 }
1282 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1283 printNodeInfo();
1284 }
1285
1286 void
1287 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1288 {
1289 bool move = b->degree >= b->degreeLimit;
1290
1291 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1292 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1293 a->getValue()->id, a->degree, a->degreeLimit,
1294 b->getValue()->id, b->degree, b->degreeLimit);
1295
1296 b->degree -= relDegree[a->colors][b->colors];
1297
1298 move = move && b->degree < b->degreeLimit;
1299 if (move && !DLLIST_EMPTY(b)) {
1300 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1301 DLLIST_DEL(b);
1302 DLLIST_ADDTAIL(&lo[l], b);
1303 }
1304 }
1305
1306 void
1307 GCRA::simplifyNode(RIG_Node *node)
1308 {
1309 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1310 simplifyEdge(node, RIG_Node::get(ei));
1311
1312 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1313 simplifyEdge(node, RIG_Node::get(ei));
1314
1315 DLLIST_DEL(node);
1316 stack.push(node->getValue()->id);
1317
1318 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1319 node->getValue()->id,
1320 (node->degree < node->degreeLimit) ? "" : "(spill)");
1321 }
1322
1323 bool
1324 GCRA::simplify()
1325 {
1326 for (;;) {
1327 if (!DLLIST_EMPTY(&lo[0])) {
1328 do {
1329 simplifyNode(lo[0].next);
1330 } while (!DLLIST_EMPTY(&lo[0]));
1331 } else
1332 if (!DLLIST_EMPTY(&lo[1])) {
1333 simplifyNode(lo[1].next);
1334 } else
1335 if (!DLLIST_EMPTY(&hi)) {
1336 RIG_Node *best = hi.next;
1337 unsigned bestMaxReg = best->maxReg;
1338 float bestScore = best->weight / (float)best->degree;
1339 // Spill candidate. First go through the ones with the highest max
1340 // register, then the ones with lower. That way the ones with the
1341 // lowest requirement will be allocated first, since it's a stack.
1342 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1343 float score = it->weight / (float)it->degree;
1344 if (score < bestScore || it->maxReg > bestMaxReg) {
1345 best = it;
1346 bestScore = score;
1347 bestMaxReg = it->maxReg;
1348 }
1349 }
1350 if (isinf(bestScore)) {
1351 ERROR("no viable spill candidates left\n");
1352 return false;
1353 }
1354 simplifyNode(best);
1355 } else {
1356 return true;
1357 }
1358 }
1359 }
1360
1361 void
1362 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1363 {
1364 const RIG_Node *intf = RIG_Node::get(ei);
1365
1366 if (intf->reg < 0)
1367 return;
1368 const LValue *vA = node->getValue();
1369 const LValue *vB = intf->getValue();
1370
1371 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1372
1373 if (vA->compound | vB->compound) {
1374 // NOTE: this only works for >aligned< register tuples !
1375 for (Value::DefCIterator D = vA->defs.begin(); D != vA->defs.end(); ++D) {
1376 for (Value::DefCIterator d = vB->defs.begin(); d != vB->defs.end(); ++d) {
1377 const LValue *vD = (*D)->get()->asLValue();
1378 const LValue *vd = (*d)->get()->asLValue();
1379
1380 if (!vD->livei.overlaps(vd->livei)) {
1381 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1382 vD->id, vd->id);
1383 continue;
1384 }
1385
1386 uint8_t mask = vD->compound ? vD->compMask : ~0;
1387 if (vd->compound) {
1388 assert(vB->compound);
1389 mask &= vd->compMask & vB->compMask;
1390 } else {
1391 mask &= intfMask;
1392 }
1393
1394 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1395 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1396 vD->id,
1397 vD->compound ? vD->compMask : 0xff,
1398 vd->id,
1399 vd->compound ? vd->compMask : intfMask,
1400 vB->compMask, intf->reg & ~7, mask);
1401 if (mask)
1402 regs.occupyMask(node->f, intf->reg & ~7, mask);
1403 }
1404 }
1405 } else {
1406 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1407 "(%%%i) X (%%%i): $r%i + %u\n",
1408 vA->id, vB->id, intf->reg, intf->colors);
1409 regs.occupy(node->f, intf->reg, intf->colors);
1410 }
1411 }
1412
1413 bool
1414 GCRA::selectRegisters()
1415 {
1416 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1417
1418 while (!stack.empty()) {
1419 RIG_Node *node = &nodes[stack.top()];
1420 stack.pop();
1421
1422 regs.reset(node->f);
1423
1424 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1425 node->getValue()->id, node->colors);
1426
1427 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1428 checkInterference(node, ei);
1429 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1430 checkInterference(node, ei);
1431
1432 if (!node->prefRegs.empty()) {
1433 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1434 it != node->prefRegs.end();
1435 ++it) {
1436 if ((*it)->reg >= 0 &&
1437 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1438 node->reg = (*it)->reg;
1439 break;
1440 }
1441 }
1442 }
1443 if (node->reg >= 0)
1444 continue;
1445 LValue *lval = node->getValue();
1446 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1447 regs.print(node->f);
1448 bool ret = regs.assign(node->reg, node->f, node->colors, node->maxReg);
1449 if (ret) {
1450 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1451 lval->compMask = node->getCompMask();
1452 } else {
1453 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1454 lval->id, lval->reg.size);
1455 Symbol *slot = NULL;
1456 if (lval->reg.file == FILE_GPR)
1457 slot = spill.assignSlot(node->livei, lval->reg.size);
1458 mustSpill.push_back(ValuePair(lval, slot));
1459 }
1460 }
1461 if (!mustSpill.empty())
1462 return false;
1463 for (unsigned int i = 0; i < nodeCount; ++i) {
1464 LValue *lval = nodes[i].getValue();
1465 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1466 lval->reg.data.id =
1467 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1468 }
1469 return true;
1470 }
1471
1472 bool
1473 GCRA::allocateRegisters(ArrayList& insns)
1474 {
1475 bool ret;
1476
1477 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1478 "allocateRegisters to %u instructions\n", insns.getSize());
1479
1480 nodeCount = func->allLValues.getSize();
1481 nodes = new RIG_Node[nodeCount];
1482 if (!nodes)
1483 return false;
1484 for (unsigned int i = 0; i < nodeCount; ++i) {
1485 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1486 if (lval) {
1487 nodes[i].init(regs, lval);
1488 RIG.insert(&nodes[i]);
1489
1490 if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL) {
1491 Instruction *insn = lval->getInsn();
1492 if (insn->op != OP_MAD && insn->op != OP_FMA && insn->op != OP_SAD)
1493 continue;
1494 // For both of the cases below, we only want to add the preference
1495 // if all arguments are in registers.
1496 if (insn->src(0).getFile() != FILE_GPR ||
1497 insn->src(1).getFile() != FILE_GPR ||
1498 insn->src(2).getFile() != FILE_GPR)
1499 continue;
1500 if (prog->getTarget()->getChipset() < 0xc0) {
1501 // Outputting a flag is not supported with short encodings nor
1502 // with immediate arguments.
1503 // See handleMADforNV50.
1504 if (insn->flagsDef >= 0)
1505 continue;
1506 } else {
1507 // We can only fold immediate arguments if dst == src2. This
1508 // only matters if one of the first two arguments is an
1509 // immediate. This form is also only supported for floats.
1510 // See handleMADforNVC0.
1511 ImmediateValue imm;
1512 if (insn->dType != TYPE_F32)
1513 continue;
1514 if (!insn->src(0).getImmediate(imm) &&
1515 !insn->src(1).getImmediate(imm))
1516 continue;
1517 }
1518
1519 nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
1520 }
1521 }
1522 }
1523
1524 // coalesce first, we use only 1 RIG node for a group of joined values
1525 ret = coalesce(insns);
1526 if (!ret)
1527 goto out;
1528
1529 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1530 func->printLiveIntervals();
1531
1532 buildRIG(insns);
1533 calculateSpillWeights();
1534 ret = simplify();
1535 if (!ret)
1536 goto out;
1537
1538 ret = selectRegisters();
1539 if (!ret) {
1540 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1541 "selectRegisters failed, inserting spill code ...\n");
1542 regs.reset(FILE_GPR, true);
1543 spill.run(mustSpill);
1544 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1545 func->print();
1546 } else {
1547 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1548 }
1549
1550 out:
1551 cleanup(ret);
1552 return ret;
1553 }
1554
1555 void
1556 GCRA::cleanup(const bool success)
1557 {
1558 mustSpill.clear();
1559
1560 for (ArrayList::Iterator it = func->allLValues.iterator();
1561 !it.end(); it.next()) {
1562 LValue *lval = reinterpret_cast<LValue *>(it.get());
1563
1564 lval->livei.clear();
1565
1566 lval->compound = 0;
1567 lval->compMask = 0;
1568
1569 if (lval->join == lval)
1570 continue;
1571
1572 if (success) {
1573 lval->reg.data.id = lval->join->reg.data.id;
1574 } else {
1575 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1576 ++d)
1577 lval->join->defs.remove(*d);
1578 lval->join = lval;
1579 }
1580 }
1581
1582 if (success)
1583 resolveSplitsAndMerges();
1584 splits.clear(); // avoid duplicate entries on next coalesce pass
1585 merges.clear();
1586
1587 delete[] nodes;
1588 nodes = NULL;
1589 hi.next = hi.prev = &hi;
1590 lo[0].next = lo[0].prev = &lo[0];
1591 lo[1].next = lo[1].prev = &lo[1];
1592 }
1593
1594 Symbol *
1595 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1596 {
1597 SpillSlot slot;
1598 int32_t offsetBase = stackSize;
1599 int32_t offset;
1600 std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1601
1602 if (offsetBase % size)
1603 offsetBase += size - (offsetBase % size);
1604
1605 slot.sym = NULL;
1606
1607 for (offset = offsetBase; offset < stackSize; offset += size) {
1608 const int32_t entryEnd = offset + size;
1609 while (it != slots.end() && it->offset < offset)
1610 ++it;
1611 if (it == slots.end()) // no slots left
1612 break;
1613 std::list<SpillSlot>::iterator bgn = it;
1614
1615 while (it != slots.end() && it->offset < entryEnd) {
1616 it->occup.print();
1617 if (it->occup.overlaps(livei))
1618 break;
1619 ++it;
1620 }
1621 if (it == slots.end() || it->offset >= entryEnd) {
1622 // fits
1623 for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1624 bgn->occup.insert(livei);
1625 if (bgn->size() == size)
1626 slot.sym = bgn->sym;
1627 }
1628 break;
1629 }
1630 }
1631 if (!slot.sym) {
1632 stackSize = offset + size;
1633 slot.offset = offset;
1634 slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1635 if (!func->stackPtr)
1636 offset += func->tlsBase;
1637 slot.sym->setAddress(NULL, offset);
1638 slot.sym->reg.size = size;
1639 slots.insert(pos, slot)->occup.insert(livei);
1640 }
1641 return slot.sym;
1642 }
1643
1644 Value *
1645 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1646 {
1647 if (!lval->compound || (lval->compMask & 0x1))
1648 return base;
1649 Value *slot = cloneShallow(func, base);
1650
1651 slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1652 slot->reg.size = lval->reg.size;
1653
1654 return slot;
1655 }
1656
1657 void
1658 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1659 {
1660 const DataType ty = typeOfSize(lval->reg.size);
1661
1662 slot = offsetSlot(slot, lval);
1663
1664 Instruction *st;
1665 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1666 lval->noSpill = 1;
1667 if (ty != TYPE_B96) {
1668 st = new_Instruction(func, OP_STORE, ty);
1669 st->setSrc(0, slot);
1670 st->setSrc(1, lval);
1671 } else {
1672 st = new_Instruction(func, OP_SPLIT, ty);
1673 st->setSrc(0, lval);
1674 for (int d = 0; d < lval->reg.size / 4; ++d)
1675 st->setDef(d, new_LValue(func, FILE_GPR));
1676
1677 for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
1678 Value *tmp = cloneShallow(func, slot);
1679 tmp->reg.size = 4;
1680 tmp->reg.data.offset += 4 * d;
1681
1682 Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
1683 s->setSrc(0, tmp);
1684 s->setSrc(1, st->getDef(d));
1685 defi->bb->insertAfter(defi, s);
1686 }
1687 }
1688 } else {
1689 st = new_Instruction(func, OP_CVT, ty);
1690 st->setDef(0, slot);
1691 st->setSrc(0, lval);
1692 if (lval->reg.file == FILE_FLAGS)
1693 st->flagsSrc = 0;
1694 }
1695 defi->bb->insertAfter(defi, st);
1696 }
1697
1698 LValue *
1699 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1700 {
1701 const DataType ty = typeOfSize(lval->reg.size);
1702
1703 slot = offsetSlot(slot, lval);
1704 lval = cloneShallow(func, lval);
1705
1706 Instruction *ld;
1707 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1708 lval->noSpill = 1;
1709 if (ty != TYPE_B96) {
1710 ld = new_Instruction(func, OP_LOAD, ty);
1711 } else {
1712 ld = new_Instruction(func, OP_MERGE, ty);
1713 for (int d = 0; d < lval->reg.size / 4; ++d) {
1714 Value *tmp = cloneShallow(func, slot);
1715 LValue *val;
1716 tmp->reg.size = 4;
1717 tmp->reg.data.offset += 4 * d;
1718
1719 Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
1720 l->setDef(0, (val = new_LValue(func, FILE_GPR)));
1721 l->setSrc(0, tmp);
1722 usei->bb->insertBefore(usei, l);
1723 ld->setSrc(d, val);
1724 val->noSpill = 1;
1725 }
1726 ld->setDef(0, lval);
1727 usei->bb->insertBefore(usei, ld);
1728 return lval;
1729 }
1730 } else {
1731 ld = new_Instruction(func, OP_CVT, ty);
1732 }
1733 ld->setDef(0, lval);
1734 ld->setSrc(0, slot);
1735 if (lval->reg.file == FILE_FLAGS)
1736 ld->flagsDef = 0;
1737
1738 usei->bb->insertBefore(usei, ld);
1739 return lval;
1740 }
1741
1742 static bool
1743 value_cmp(ValueRef *a, ValueRef *b) {
1744 Instruction *ai = a->getInsn(), *bi = b->getInsn();
1745 if (ai->bb != bi->bb)
1746 return ai->bb->getId() < bi->bb->getId();
1747 return ai->serial < bi->serial;
1748 }
1749
1750 // For each value that is to be spilled, go through all its definitions.
1751 // A value can have multiple definitions if it has been coalesced before.
1752 // For each definition, first go through all its uses and insert an unspill
1753 // instruction before it, then replace the use with the temporary register.
1754 // Unspill can be either a load from memory or simply a move to another
1755 // register file.
1756 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1757 // if we have spilled to a memory location, or simply with the new register.
1758 // No load or conversion instruction should be needed.
1759 bool
1760 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1761 {
1762 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1763 ++it) {
1764 LValue *lval = it->first->asLValue();
1765 Symbol *mem = it->second ? it->second->asSym() : NULL;
1766
1767 // Keep track of which instructions to delete later. Deleting them
1768 // inside the loop is unsafe since a single instruction may have
1769 // multiple destinations that all need to be spilled (like OP_SPLIT).
1770 unordered_set<Instruction *> to_del;
1771
1772 for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
1773 ++d) {
1774 Value *slot = mem ?
1775 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1776 Value *tmp = NULL;
1777 Instruction *last = NULL;
1778
1779 LValue *dval = (*d)->get()->asLValue();
1780 Instruction *defi = (*d)->getInsn();
1781
1782 // Sort all the uses by BB/instruction so that we don't unspill
1783 // multiple times in a row, and also remove a source of
1784 // non-determinism.
1785 std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
1786 std::sort(refs.begin(), refs.end(), value_cmp);
1787
1788 // Unspill at each use *before* inserting spill instructions,
1789 // we don't want to have the spill instructions in the use list here.
1790 for (std::vector<ValueRef*>::const_iterator it = refs.begin();
1791 it != refs.end(); ++it) {
1792 ValueRef *u = *it;
1793 Instruction *usei = u->getInsn();
1794 assert(usei);
1795 if (usei->isPseudo()) {
1796 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1797 last = NULL;
1798 } else {
1799 if (!last || (usei != last->next && usei != last))
1800 tmp = unspill(usei, dval, slot);
1801 last = usei;
1802 }
1803 u->set(tmp);
1804 }
1805
1806 assert(defi);
1807 if (defi->isPseudo()) {
1808 d = lval->defs.erase(d);
1809 --d;
1810 if (slot->reg.file == FILE_MEMORY_LOCAL)
1811 to_del.insert(defi);
1812 else
1813 defi->setDef(0, slot);
1814 } else {
1815 spill(defi, slot, dval);
1816 }
1817 }
1818
1819 for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
1820 it != to_del.end(); ++it)
1821 delete_Instruction(func->getProgram(), *it);
1822 }
1823
1824 // TODO: We're not trying to reuse old slots in a potential next iteration.
1825 // We have to update the slots' livei intervals to be able to do that.
1826 stackBase = stackSize;
1827 slots.clear();
1828 return true;
1829 }
1830
1831 bool
1832 RegAlloc::exec()
1833 {
1834 for (IteratorRef it = prog->calls.iteratorDFS(false);
1835 !it->end(); it->next()) {
1836 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1837
1838 func->tlsBase = prog->tlsSize;
1839 if (!execFunc())
1840 return false;
1841 prog->tlsSize += func->tlsSize;
1842 }
1843 return true;
1844 }
1845
1846 bool
1847 RegAlloc::execFunc()
1848 {
1849 InsertConstraintsPass insertConstr;
1850 PhiMovesPass insertPhiMoves;
1851 ArgumentMovesPass insertArgMoves;
1852 BuildIntervalsPass buildIntervals;
1853 SpillCodeInserter insertSpills(func);
1854
1855 GCRA gcra(func, insertSpills);
1856
1857 unsigned int i, retries;
1858 bool ret;
1859
1860 if (!func->ins.empty()) {
1861 // Insert a nop at the entry so inputs only used by the first instruction
1862 // don't count as having an empty live range.
1863 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1864 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1865 }
1866
1867 ret = insertConstr.exec(func);
1868 if (!ret)
1869 goto out;
1870
1871 ret = insertPhiMoves.run(func);
1872 if (!ret)
1873 goto out;
1874
1875 ret = insertArgMoves.run(func);
1876 if (!ret)
1877 goto out;
1878
1879 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1880 for (retries = 0; retries < 3; ++retries) {
1881 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1882 INFO("Retry: %i\n", retries);
1883 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1884 func->print();
1885
1886 // spilling to registers may add live ranges, need to rebuild everything
1887 ret = true;
1888 for (sequence = func->cfg.nextSequence(), i = 0;
1889 ret && i <= func->loopNestingBound;
1890 sequence = func->cfg.nextSequence(), ++i)
1891 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1892 // reset marker
1893 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1894 !bi.end(); bi.next())
1895 BasicBlock::get(bi)->liveSet.marker = false;
1896 if (!ret)
1897 break;
1898 func->orderInstructions(this->insns);
1899
1900 ret = buildIntervals.run(func);
1901 if (!ret)
1902 break;
1903 ret = gcra.allocateRegisters(insns);
1904 if (ret)
1905 break; // success
1906 }
1907 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1908
1909 func->tlsSize = insertSpills.getStackSize();
1910 out:
1911 return ret;
1912 }
1913
1914 // TODO: check if modifying Instruction::join here breaks anything
1915 void
1916 GCRA::resolveSplitsAndMerges()
1917 {
1918 for (std::list<Instruction *>::iterator it = splits.begin();
1919 it != splits.end();
1920 ++it) {
1921 Instruction *split = *it;
1922 unsigned int reg = regs.idToBytes(split->getSrc(0));
1923 for (int d = 0; split->defExists(d); ++d) {
1924 Value *v = split->getDef(d);
1925 v->reg.data.id = regs.bytesToId(v, reg);
1926 v->join = v;
1927 reg += v->reg.size;
1928 }
1929 }
1930 splits.clear();
1931
1932 for (std::list<Instruction *>::iterator it = merges.begin();
1933 it != merges.end();
1934 ++it) {
1935 Instruction *merge = *it;
1936 unsigned int reg = regs.idToBytes(merge->getDef(0));
1937 for (int s = 0; merge->srcExists(s); ++s) {
1938 Value *v = merge->getSrc(s);
1939 v->reg.data.id = regs.bytesToId(v, reg);
1940 v->join = v;
1941 // If the value is defined by a phi/union node, we also need to
1942 // perform the same fixup on that node's sources, since after RA
1943 // their registers should be identical.
1944 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1945 Instruction *phi = v->getInsn();
1946 for (int phis = 0; phi->srcExists(phis); ++phis) {
1947 phi->getSrc(phis)->join = v;
1948 phi->getSrc(phis)->reg.data.id = v->reg.data.id;
1949 }
1950 }
1951 reg += v->reg.size;
1952 }
1953 }
1954 merges.clear();
1955 }
1956
1957 bool Program::registerAllocation()
1958 {
1959 RegAlloc ra(this);
1960 return ra.exec();
1961 }
1962
1963 bool
1964 RegAlloc::InsertConstraintsPass::exec(Function *ir)
1965 {
1966 constrList.clear();
1967
1968 bool ret = run(ir, true, true);
1969 if (ret)
1970 ret = insertConstraintMoves();
1971 return ret;
1972 }
1973
1974 // TODO: make part of texture insn
1975 void
1976 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
1977 {
1978 Value *def[4];
1979 int c, k, d;
1980 uint8_t mask = 0;
1981
1982 for (d = 0, k = 0, c = 0; c < 4; ++c) {
1983 if (!(tex->tex.mask & (1 << c)))
1984 continue;
1985 if (tex->getDef(k)->refCount()) {
1986 mask |= 1 << c;
1987 def[d++] = tex->getDef(k);
1988 }
1989 ++k;
1990 }
1991 tex->tex.mask = mask;
1992
1993 for (c = 0; c < d; ++c)
1994 tex->setDef(c, def[c]);
1995 for (; c < 4; ++c)
1996 tex->setDef(c, NULL);
1997 }
1998
1999 bool
2000 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
2001 {
2002 Value *v = cst->getSrc(s);
2003
2004 // current register allocation can't handle it if a value participates in
2005 // multiple constraints
2006 for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
2007 if (cst != (*it)->getInsn())
2008 return true;
2009 }
2010
2011 // can start at s + 1 because detectConflict is called on all sources
2012 for (int c = s + 1; cst->srcExists(c); ++c)
2013 if (v == cst->getSrc(c))
2014 return true;
2015
2016 Instruction *defi = v->getInsn();
2017
2018 return (!defi || defi->constrainedDefs());
2019 }
2020
2021 void
2022 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
2023 {
2024 Instruction *cst;
2025 int d;
2026
2027 // first, look for an existing identical constraint op
2028 for (std::list<Instruction *>::iterator it = constrList.begin();
2029 it != constrList.end();
2030 ++it) {
2031 cst = (*it);
2032 if (!i->bb->dominatedBy(cst->bb))
2033 break;
2034 for (d = 0; d < n; ++d)
2035 if (cst->getSrc(d) != i->getSrc(d + s))
2036 break;
2037 if (d >= n) {
2038 for (d = 0; d < n; ++d, ++s)
2039 i->setSrc(s, cst->getDef(d));
2040 return;
2041 }
2042 }
2043 cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
2044
2045 for (d = 0; d < n; ++s, ++d) {
2046 cst->setDef(d, new_LValue(func, FILE_GPR));
2047 cst->setSrc(d, i->getSrc(s));
2048 i->setSrc(s, cst->getDef(d));
2049 }
2050 i->bb->insertBefore(i, cst);
2051
2052 constrList.push_back(cst);
2053 }
2054
2055 // Add a dummy use of the pointer source of >= 8 byte loads after the load
2056 // to prevent it from being assigned a register which overlapping the load's
2057 // destination, which would produce random corruptions.
2058 void
2059 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
2060 {
2061 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
2062 hzd->setSrc(0, src->get());
2063 i->bb->insertAfter(i, hzd);
2064
2065 }
2066
2067 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
2068 void
2069 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
2070 {
2071 int n;
2072 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n);
2073 condenseDefs(insn, 0, n - 1);
2074 }
2075
2076 void
2077 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn,
2078 const int a, const int b)
2079 {
2080 uint8_t size = 0;
2081 if (a >= b)
2082 return;
2083 for (int s = a; s <= b; ++s)
2084 size += insn->getDef(s)->reg.size;
2085 if (!size)
2086 return;
2087
2088 LValue *lval = new_LValue(func, FILE_GPR);
2089 lval->reg.size = size;
2090
2091 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
2092 split->setSrc(0, lval);
2093 for (int d = a; d <= b; ++d) {
2094 split->setDef(d - a, insn->getDef(d));
2095 insn->setDef(d, NULL);
2096 }
2097 insn->setDef(a, lval);
2098
2099 for (int k = a + 1, d = b + 1; insn->defExists(d); ++d, ++k) {
2100 insn->setDef(k, insn->getDef(d));
2101 insn->setDef(d, NULL);
2102 }
2103 // carry over predicate if any (mainly for OP_UNION uses)
2104 split->setPredicate(insn->cc, insn->getPredicate());
2105
2106 insn->bb->insertAfter(insn, split);
2107 constrList.push_back(split);
2108 }
2109
2110 void
2111 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
2112 const int a, const int b)
2113 {
2114 uint8_t size = 0;
2115 if (a >= b)
2116 return;
2117 for (int s = a; s <= b; ++s)
2118 size += insn->getSrc(s)->reg.size;
2119 if (!size)
2120 return;
2121 LValue *lval = new_LValue(func, FILE_GPR);
2122 lval->reg.size = size;
2123
2124 Value *save[3];
2125 insn->takeExtraSources(0, save);
2126
2127 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
2128 merge->setDef(0, lval);
2129 for (int s = a, i = 0; s <= b; ++s, ++i) {
2130 merge->setSrc(i, insn->getSrc(s));
2131 }
2132 insn->moveSources(b + 1, a - b);
2133 insn->setSrc(a, lval);
2134 insn->bb->insertBefore(insn, merge);
2135
2136 insn->putExtraSources(0, save);
2137
2138 constrList.push_back(merge);
2139 }
2140
2141 bool
2142 RegAlloc::InsertConstraintsPass::isScalarTexGM107(TexInstruction *tex)
2143 {
2144 if (tex->tex.sIndirectSrc >= 0 ||
2145 tex->tex.rIndirectSrc >= 0 ||
2146 tex->tex.derivAll)
2147 return false;
2148
2149 if (tex->tex.mask == 5 || tex->tex.mask == 6)
2150 return false;
2151
2152 switch (tex->op) {
2153 case OP_TEX:
2154 case OP_TXF:
2155 case OP_TXG:
2156 case OP_TXL:
2157 break;
2158 default:
2159 return false;
2160 }
2161
2162 // legal variants:
2163 // TEXS.1D.LZ
2164 // TEXS.2D
2165 // TEXS.2D.LZ
2166 // TEXS.2D.LL
2167 // TEXS.2D.DC
2168 // TEXS.2D.LL.DC
2169 // TEXS.2D.LZ.DC
2170 // TEXS.A2D
2171 // TEXS.A2D.LZ
2172 // TEXS.A2D.LZ.DC
2173 // TEXS.3D
2174 // TEXS.3D.LZ
2175 // TEXS.CUBE
2176 // TEXS.CUBE.LL
2177
2178 // TLDS.1D.LZ
2179 // TLDS.1D.LL
2180 // TLDS.2D.LZ
2181 // TLSD.2D.LZ.AOFFI
2182 // TLDS.2D.LZ.MZ
2183 // TLDS.2D.LL
2184 // TLDS.2D.LL.AOFFI
2185 // TLDS.A2D.LZ
2186 // TLDS.3D.LZ
2187
2188 // TLD4S: all 2D/RECT variants and only offset
2189
2190 switch (tex->op) {
2191 case OP_TEX:
2192 if (tex->tex.useOffsets)
2193 return false;
2194
2195 switch (tex->tex.target.getEnum()) {
2196 case TEX_TARGET_1D:
2197 case TEX_TARGET_2D_ARRAY_SHADOW:
2198 return tex->tex.levelZero;
2199 case TEX_TARGET_CUBE:
2200 return !tex->tex.levelZero;
2201 case TEX_TARGET_2D:
2202 case TEX_TARGET_2D_ARRAY:
2203 case TEX_TARGET_2D_SHADOW:
2204 case TEX_TARGET_3D:
2205 case TEX_TARGET_RECT:
2206 case TEX_TARGET_RECT_SHADOW:
2207 return true;
2208 default:
2209 return false;
2210 }
2211
2212 case OP_TXL:
2213 if (tex->tex.useOffsets)
2214 return false;
2215
2216 switch (tex->tex.target.getEnum()) {
2217 case TEX_TARGET_2D:
2218 case TEX_TARGET_2D_SHADOW:
2219 case TEX_TARGET_RECT:
2220 case TEX_TARGET_RECT_SHADOW:
2221 case TEX_TARGET_CUBE:
2222 return true;
2223 default:
2224 return false;
2225 }
2226
2227 case OP_TXF:
2228 switch (tex->tex.target.getEnum()) {
2229 case TEX_TARGET_1D:
2230 return !tex->tex.useOffsets;
2231 case TEX_TARGET_2D:
2232 case TEX_TARGET_RECT:
2233 return true;
2234 case TEX_TARGET_2D_ARRAY:
2235 case TEX_TARGET_2D_MS:
2236 case TEX_TARGET_3D:
2237 return !tex->tex.useOffsets && tex->tex.levelZero;
2238 default:
2239 return false;
2240 }
2241
2242 case OP_TXG:
2243 if (tex->tex.useOffsets > 1)
2244 return false;
2245 if (tex->tex.mask != 0x3 && tex->tex.mask != 0xf)
2246 return false;
2247
2248 switch (tex->tex.target.getEnum()) {
2249 case TEX_TARGET_2D:
2250 case TEX_TARGET_2D_MS:
2251 case TEX_TARGET_2D_SHADOW:
2252 case TEX_TARGET_RECT:
2253 case TEX_TARGET_RECT_SHADOW:
2254 return true;
2255 default:
2256 return false;
2257 }
2258
2259 default:
2260 return false;
2261 }
2262 }
2263
2264 void
2265 RegAlloc::InsertConstraintsPass::handleScalarTexGM107(TexInstruction *tex)
2266 {
2267 int defCount = tex->defCount(0xff);
2268 int srcCount = tex->srcCount(0xff);
2269
2270 tex->tex.scalar = true;
2271
2272 // 1. handle defs
2273 if (defCount > 3)
2274 condenseDefs(tex, 2, 3);
2275 if (defCount > 1)
2276 condenseDefs(tex, 0, 1);
2277
2278 // 2. handle srcs
2279 // special case for TXF.A2D
2280 if (tex->op == OP_TXF && tex->tex.target == TEX_TARGET_2D_ARRAY) {
2281 assert(srcCount >= 3);
2282 condenseSrcs(tex, 1, 2);
2283 } else {
2284 if (srcCount > 3)
2285 condenseSrcs(tex, 2, 3);
2286 // only if we have more than 2 sources
2287 if (srcCount > 2)
2288 condenseSrcs(tex, 0, 1);
2289 }
2290
2291 assert(!tex->defExists(2) && !tex->srcExists(2));
2292 }
2293
2294 void
2295 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
2296 {
2297 int n, s;
2298
2299 if (isTextureOp(tex->op))
2300 textureMask(tex);
2301
2302 if (targ->getChipset() < NVISA_GV100_CHIPSET) {
2303 if (isScalarTexGM107(tex)) {
2304 handleScalarTexGM107(tex);
2305 return;
2306 }
2307
2308 assert(!tex->tex.scalar);
2309 condenseDefs(tex);
2310 } else {
2311 if (isTextureOp(tex->op)) {
2312 int defCount = tex->defCount(0xff);
2313 if (defCount > 3)
2314 condenseDefs(tex, 2, 3);
2315 if (defCount > 1)
2316 condenseDefs(tex, 0, 1);
2317 } else {
2318 condenseDefs(tex);
2319 }
2320 }
2321
2322 if (isSurfaceOp(tex->op)) {
2323 int s = tex->tex.target.getDim() +
2324 (tex->tex.target.isArray() || tex->tex.target.isCube());
2325 int n = 0;
2326
2327 switch (tex->op) {
2328 case OP_SUSTB:
2329 case OP_SUSTP:
2330 n = 4;
2331 break;
2332 case OP_SUREDB:
2333 case OP_SUREDP:
2334 if (tex->subOp == NV50_IR_SUBOP_ATOM_CAS)
2335 n = 2;
2336 break;
2337 default:
2338 break;
2339 }
2340
2341 if (s > 1)
2342 condenseSrcs(tex, 0, s - 1);
2343 if (n > 1)
2344 condenseSrcs(tex, 1, n); // do not condense the tex handle
2345 } else
2346 if (isTextureOp(tex->op)) {
2347 if (tex->op != OP_TXQ) {
2348 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2349 if (tex->op == OP_TXD) {
2350 // Indirect handle belongs in the first arg
2351 if (tex->tex.rIndirectSrc >= 0)
2352 s++;
2353 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
2354 s++;
2355 }
2356 n = tex->srcCount(0xff, true) - s;
2357 // TODO: Is this necessary? Perhaps just has to be aligned to the
2358 // level that the first arg is, not necessarily to 4. This
2359 // requirement has not been rigorously verified, as it has been on
2360 // Kepler.
2361 if (n > 0 && n < 3) {
2362 if (tex->srcExists(n + s)) // move potential predicate out of the way
2363 tex->moveSources(n + s, 3 - n);
2364 while (n < 3)
2365 tex->setSrc(s + n++, new_LValue(func, FILE_GPR));
2366 }
2367 } else {
2368 s = tex->srcCount(0xff, true);
2369 n = 0;
2370 }
2371
2372 if (s > 1)
2373 condenseSrcs(tex, 0, s - 1);
2374 if (n > 1) // NOTE: first call modified positions already
2375 condenseSrcs(tex, 1, n);
2376 }
2377 }
2378
2379 void
2380 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
2381 {
2382 if (isTextureOp(tex->op))
2383 textureMask(tex);
2384 condenseDefs(tex);
2385
2386 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
2387 condenseSrcs(tex, 3, 6);
2388 } else
2389 if (isTextureOp(tex->op)) {
2390 int n = tex->srcCount(0xff, true);
2391 int s = n > 4 ? 4 : n;
2392 if (n > 4 && n < 7) {
2393 if (tex->srcExists(n)) // move potential predicate out of the way
2394 tex->moveSources(n, 7 - n);
2395
2396 while (n < 7)
2397 tex->setSrc(n++, new_LValue(func, FILE_GPR));
2398 }
2399 if (s > 1)
2400 condenseSrcs(tex, 0, s - 1);
2401 if (n > 4)
2402 condenseSrcs(tex, 1, n - s);
2403 }
2404 }
2405
2406 void
2407 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
2408 {
2409 int n, s;
2410
2411 if (isTextureOp(tex->op))
2412 textureMask(tex);
2413
2414 if (tex->op == OP_TXQ) {
2415 s = tex->srcCount(0xff);
2416 n = 0;
2417 } else if (isSurfaceOp(tex->op)) {
2418 s = tex->tex.target.getDim() + (tex->tex.target.isArray() || tex->tex.target.isCube());
2419 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP)
2420 n = 4;
2421 else
2422 n = 0;
2423 } else {
2424 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2425 if (!tex->tex.target.isArray() &&
2426 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
2427 ++s;
2428 if (tex->op == OP_TXD && tex->tex.useOffsets)
2429 ++s;
2430 n = tex->srcCount(0xff) - s;
2431 assert(n <= 4);
2432 }
2433
2434 if (s > 1)
2435 condenseSrcs(tex, 0, s - 1);
2436 if (n > 1) // NOTE: first call modified positions already
2437 condenseSrcs(tex, 1, n);
2438
2439 condenseDefs(tex);
2440 }
2441
2442 void
2443 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
2444 {
2445 Value *pred = tex->getPredicate();
2446 if (pred)
2447 tex->setPredicate(tex->cc, NULL);
2448
2449 textureMask(tex);
2450
2451 assert(tex->defExists(0) && tex->srcExists(0));
2452 // make src and def count match
2453 int c;
2454 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2455 if (!tex->srcExists(c))
2456 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2457 else
2458 insertConstraintMove(tex, c);
2459 if (!tex->defExists(c))
2460 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2461 }
2462 if (pred)
2463 tex->setPredicate(tex->cc, pred);
2464 condenseDefs(tex);
2465 condenseSrcs(tex, 0, c - 1);
2466 }
2467
2468 // Insert constraint markers for instructions whose multiple sources must be
2469 // located in consecutive registers.
2470 bool
2471 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2472 {
2473 TexInstruction *tex;
2474 Instruction *next;
2475 int s, size;
2476
2477 targ = bb->getProgram()->getTarget();
2478
2479 for (Instruction *i = bb->getEntry(); i; i = next) {
2480 next = i->next;
2481
2482 if ((tex = i->asTex())) {
2483 switch (targ->getChipset() & ~0xf) {
2484 case 0x50:
2485 case 0x80:
2486 case 0x90:
2487 case 0xa0:
2488 texConstraintNV50(tex);
2489 break;
2490 case 0xc0:
2491 case 0xd0:
2492 texConstraintNVC0(tex);
2493 break;
2494 case 0xe0:
2495 case 0xf0:
2496 case 0x100:
2497 texConstraintNVE0(tex);
2498 break;
2499 case 0x110:
2500 case 0x120:
2501 case 0x130:
2502 case 0x140:
2503 case 0x160:
2504 texConstraintGM107(tex);
2505 break;
2506 default:
2507 break;
2508 }
2509 } else
2510 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2511 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2512 assert(i->srcExists(s));
2513 size -= i->getSrc(s)->reg.size;
2514 }
2515 condenseSrcs(i, 1, s - 1);
2516 } else
2517 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2518 condenseDefs(i);
2519 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2520 addHazard(i, i->src(0).getIndirect(0));
2521 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2522 addHazard(i, i->src(0).getIndirect(1));
2523 } else
2524 if (i->op == OP_UNION ||
2525 i->op == OP_MERGE ||
2526 i->op == OP_SPLIT) {
2527 constrList.push_back(i);
2528 }
2529 }
2530 return true;
2531 }
2532
2533 void
2534 RegAlloc::InsertConstraintsPass::insertConstraintMove(Instruction *cst, int s)
2535 {
2536 const uint8_t size = cst->src(s).getSize();
2537
2538 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2539
2540 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2541
2542 bool imm = defi->op == OP_MOV &&
2543 defi->src(0).getFile() == FILE_IMMEDIATE;
2544 bool load = defi->op == OP_LOAD &&
2545 defi->src(0).getFile() == FILE_MEMORY_CONST &&
2546 !defi->src(0).isIndirect(0);
2547 // catch some cases where don't really need MOVs
2548 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs()) {
2549 if (imm || load) {
2550 // Move the defi right before the cst. No point in expanding
2551 // the range.
2552 defi->bb->remove(defi);
2553 cst->bb->insertBefore(cst, defi);
2554 }
2555 return;
2556 }
2557
2558 LValue *lval = new_LValue(func, cst->src(s).getFile());
2559 lval->reg.size = size;
2560
2561 Instruction *mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2562 mov->setDef(0, lval);
2563 mov->setSrc(0, cst->getSrc(s));
2564
2565 if (load) {
2566 mov->op = OP_LOAD;
2567 mov->setSrc(0, defi->getSrc(0));
2568 } else if (imm) {
2569 mov->setSrc(0, defi->getSrc(0));
2570 }
2571
2572 if (defi->getPredicate())
2573 mov->setPredicate(defi->cc, defi->getPredicate());
2574
2575 cst->setSrc(s, mov->getDef(0));
2576 cst->bb->insertBefore(cst, mov);
2577
2578 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2579 }
2580
2581 // Insert extra moves so that, if multiple register constraints on a value are
2582 // in conflict, these conflicts can be resolved.
2583 bool
2584 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2585 {
2586 for (std::list<Instruction *>::iterator it = constrList.begin();
2587 it != constrList.end();
2588 ++it) {
2589 Instruction *cst = *it;
2590 Instruction *mov;
2591
2592 if (cst->op == OP_SPLIT && 0) {
2593 // spilling splits is annoying, just make sure they're separate
2594 for (int d = 0; cst->defExists(d); ++d) {
2595 if (!cst->getDef(d)->refCount())
2596 continue;
2597 LValue *lval = new_LValue(func, cst->def(d).getFile());
2598 const uint8_t size = cst->def(d).getSize();
2599 lval->reg.size = size;
2600
2601 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2602 mov->setSrc(0, lval);
2603 mov->setDef(0, cst->getDef(d));
2604 cst->setDef(d, mov->getSrc(0));
2605 cst->bb->insertAfter(cst, mov);
2606
2607 cst->getSrc(0)->asLValue()->noSpill = 1;
2608 mov->getSrc(0)->asLValue()->noSpill = 1;
2609 }
2610 } else
2611 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2612 for (int s = 0; cst->srcExists(s); ++s) {
2613 const uint8_t size = cst->src(s).getSize();
2614
2615 if (!cst->getSrc(s)->defs.size()) {
2616 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2617 mov->setDef(0, cst->getSrc(s));
2618 cst->bb->insertBefore(cst, mov);
2619 continue;
2620 }
2621
2622 insertConstraintMove(cst, s);
2623 }
2624 }
2625 }
2626
2627 return true;
2628 }
2629
2630 } // namespace nv50_ir