nvc0/ir: move sample id to second source arg to fix sampler2DMS
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_lowering_nvc0.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_build_util.h"
25
26 #include "codegen/nv50_ir_target_nvc0.h"
27
28 #include <limits>
29
30 namespace nv50_ir {
31
32 #define QOP_ADD 0
33 #define QOP_SUBR 1
34 #define QOP_SUB 2
35 #define QOP_MOV2 3
36
37 // UL UR LL LR
38 #define QUADOP(q, r, s, t) \
39 ((QOP_##q << 6) | (QOP_##r << 4) | \
40 (QOP_##s << 2) | (QOP_##t << 0))
41
42 class NVC0LegalizeSSA : public Pass
43 {
44 private:
45 virtual bool visit(BasicBlock *);
46 virtual bool visit(Function *);
47
48 // we want to insert calls to the builtin library only after optimization
49 void handleDIV(Instruction *); // integer division, modulus
50 void handleRCPRSQ(Instruction *); // double precision float recip/rsqrt
51
52 private:
53 BuildUtil bld;
54 };
55
56 void
57 NVC0LegalizeSSA::handleDIV(Instruction *i)
58 {
59 FlowInstruction *call;
60 int builtin;
61 Value *def[2];
62
63 bld.setPosition(i, false);
64 def[0] = bld.mkMovToReg(0, i->getSrc(0))->getDef(0);
65 def[1] = bld.mkMovToReg(1, i->getSrc(1))->getDef(0);
66 switch (i->dType) {
67 case TYPE_U32: builtin = NVC0_BUILTIN_DIV_U32; break;
68 case TYPE_S32: builtin = NVC0_BUILTIN_DIV_S32; break;
69 default:
70 return;
71 }
72 call = bld.mkFlow(OP_CALL, NULL, CC_ALWAYS, NULL);
73 bld.mkMov(i->getDef(0), def[(i->op == OP_DIV) ? 0 : 1]);
74 bld.mkClobber(FILE_GPR, (i->op == OP_DIV) ? 0xe : 0xd, 2);
75 bld.mkClobber(FILE_PREDICATE, (i->dType == TYPE_S32) ? 0xf : 0x3, 0);
76
77 call->fixed = 1;
78 call->absolute = call->builtin = 1;
79 call->target.builtin = builtin;
80 delete_Instruction(prog, i);
81 }
82
83 void
84 NVC0LegalizeSSA::handleRCPRSQ(Instruction *i)
85 {
86 // TODO
87 }
88
89 bool
90 NVC0LegalizeSSA::visit(Function *fn)
91 {
92 bld.setProgram(fn->getProgram());
93 return true;
94 }
95
96 bool
97 NVC0LegalizeSSA::visit(BasicBlock *bb)
98 {
99 Instruction *next;
100 for (Instruction *i = bb->getEntry(); i; i = next) {
101 next = i->next;
102 if (i->dType == TYPE_F32)
103 continue;
104 switch (i->op) {
105 case OP_DIV:
106 case OP_MOD:
107 handleDIV(i);
108 break;
109 case OP_RCP:
110 case OP_RSQ:
111 if (i->dType == TYPE_F64)
112 handleRCPRSQ(i);
113 break;
114 default:
115 break;
116 }
117 }
118 return true;
119 }
120
121 class NVC0LegalizePostRA : public Pass
122 {
123 public:
124 NVC0LegalizePostRA(const Program *);
125
126 private:
127 virtual bool visit(Function *);
128 virtual bool visit(BasicBlock *);
129
130 void replaceZero(Instruction *);
131 bool tryReplaceContWithBra(BasicBlock *);
132 void propagateJoin(BasicBlock *);
133
134 struct TexUse
135 {
136 TexUse(Instruction *use, const Instruction *tex)
137 : insn(use), tex(tex), level(-1) { }
138 Instruction *insn;
139 const Instruction *tex; // or split / mov
140 int level;
141 };
142 struct Limits
143 {
144 Limits() { }
145 Limits(int min, int max) : min(min), max(max) { }
146 int min, max;
147 };
148 bool insertTextureBarriers(Function *);
149 inline bool insnDominatedBy(const Instruction *, const Instruction *) const;
150 void findFirstUses(const Instruction *tex, const Instruction *def,
151 std::list<TexUse>&);
152 void findOverwritingDefs(const Instruction *tex, Instruction *insn,
153 const BasicBlock *term,
154 std::list<TexUse>&);
155 void addTexUse(std::list<TexUse>&, Instruction *, const Instruction *);
156 const Instruction *recurseDef(const Instruction *);
157
158 private:
159 LValue *rZero;
160 LValue *carry;
161 const bool needTexBar;
162 };
163
164 NVC0LegalizePostRA::NVC0LegalizePostRA(const Program *prog)
165 : rZero(NULL),
166 carry(NULL),
167 needTexBar(prog->getTarget()->getChipset() >= 0xe0)
168 {
169 }
170
171 bool
172 NVC0LegalizePostRA::insnDominatedBy(const Instruction *later,
173 const Instruction *early) const
174 {
175 if (early->bb == later->bb)
176 return early->serial < later->serial;
177 return later->bb->dominatedBy(early->bb);
178 }
179
180 void
181 NVC0LegalizePostRA::addTexUse(std::list<TexUse> &uses,
182 Instruction *usei, const Instruction *insn)
183 {
184 bool add = true;
185 for (std::list<TexUse>::iterator it = uses.begin();
186 it != uses.end();) {
187 if (insnDominatedBy(usei, it->insn)) {
188 add = false;
189 break;
190 }
191 if (insnDominatedBy(it->insn, usei))
192 it = uses.erase(it);
193 else
194 ++it;
195 }
196 if (add)
197 uses.push_back(TexUse(usei, insn));
198 }
199
200 void
201 NVC0LegalizePostRA::findOverwritingDefs(const Instruction *texi,
202 Instruction *insn,
203 const BasicBlock *term,
204 std::list<TexUse> &uses)
205 {
206 while (insn->op == OP_MOV && insn->getDef(0)->equals(insn->getSrc(0)))
207 insn = insn->getSrc(0)->getUniqueInsn();
208
209 if (!insn || !insn->bb->reachableBy(texi->bb, term))
210 return;
211
212 switch (insn->op) {
213 /* Values not connected to the tex's definition through any of these should
214 * not be conflicting.
215 */
216 case OP_SPLIT:
217 case OP_MERGE:
218 case OP_PHI:
219 case OP_UNION:
220 /* recurse again */
221 for (int s = 0; insn->srcExists(s); ++s)
222 findOverwritingDefs(texi, insn->getSrc(s)->getUniqueInsn(), term,
223 uses);
224 break;
225 default:
226 // if (!isTextureOp(insn->op)) // TODO: are TEXes always ordered ?
227 addTexUse(uses, insn, texi);
228 break;
229 }
230 }
231
232 void
233 NVC0LegalizePostRA::findFirstUses(const Instruction *texi,
234 const Instruction *insn,
235 std::list<TexUse> &uses)
236 {
237 for (int d = 0; insn->defExists(d); ++d) {
238 Value *v = insn->getDef(d);
239 for (Value::UseIterator u = v->uses.begin(); u != v->uses.end(); ++u) {
240 Instruction *usei = (*u)->getInsn();
241
242 if (usei->op == OP_PHI || usei->op == OP_UNION) {
243 // need a barrier before WAW cases
244 for (int s = 0; usei->srcExists(s); ++s) {
245 Instruction *defi = usei->getSrc(s)->getUniqueInsn();
246 if (defi && &usei->src(s) != *u)
247 findOverwritingDefs(texi, defi, usei->bb, uses);
248 }
249 }
250
251 if (usei->op == OP_SPLIT ||
252 usei->op == OP_MERGE ||
253 usei->op == OP_PHI ||
254 usei->op == OP_UNION) {
255 // these uses don't manifest in the machine code
256 findFirstUses(texi, usei, uses);
257 } else
258 if (usei->op == OP_MOV && usei->getDef(0)->equals(usei->getSrc(0)) &&
259 usei->subOp != NV50_IR_SUBOP_MOV_FINAL) {
260 findFirstUses(texi, usei, uses);
261 } else {
262 addTexUse(uses, usei, insn);
263 }
264 }
265 }
266 }
267
268 // Texture barriers:
269 // This pass is a bit long and ugly and can probably be optimized.
270 //
271 // 1. obtain a list of TEXes and their outputs' first use(s)
272 // 2. calculate the barrier level of each first use (minimal number of TEXes,
273 // over all paths, between the TEX and the use in question)
274 // 3. for each barrier, if all paths from the source TEX to that barrier
275 // contain a barrier of lesser level, it can be culled
276 bool
277 NVC0LegalizePostRA::insertTextureBarriers(Function *fn)
278 {
279 std::list<TexUse> *uses;
280 std::vector<Instruction *> texes;
281 std::vector<int> bbFirstTex;
282 std::vector<int> bbFirstUse;
283 std::vector<int> texCounts;
284 std::vector<TexUse> useVec;
285 ArrayList insns;
286
287 fn->orderInstructions(insns);
288
289 texCounts.resize(fn->allBBlocks.getSize(), 0);
290 bbFirstTex.resize(fn->allBBlocks.getSize(), insns.getSize());
291 bbFirstUse.resize(fn->allBBlocks.getSize(), insns.getSize());
292
293 // tag BB CFG nodes by their id for later
294 for (ArrayList::Iterator i = fn->allBBlocks.iterator(); !i.end(); i.next()) {
295 BasicBlock *bb = reinterpret_cast<BasicBlock *>(i.get());
296 if (bb)
297 bb->cfg.tag = bb->getId();
298 }
299
300 // gather the first uses for each TEX
301 for (int i = 0; i < insns.getSize(); ++i) {
302 Instruction *tex = reinterpret_cast<Instruction *>(insns.get(i));
303 if (isTextureOp(tex->op)) {
304 texes.push_back(tex);
305 if (!texCounts.at(tex->bb->getId()))
306 bbFirstTex[tex->bb->getId()] = texes.size() - 1;
307 texCounts[tex->bb->getId()]++;
308 }
309 }
310 insns.clear();
311 if (texes.empty())
312 return false;
313 uses = new std::list<TexUse>[texes.size()];
314 if (!uses)
315 return false;
316 for (size_t i = 0; i < texes.size(); ++i)
317 findFirstUses(texes[i], texes[i], uses[i]);
318
319 // determine the barrier level at each use
320 for (size_t i = 0; i < texes.size(); ++i) {
321 for (std::list<TexUse>::iterator u = uses[i].begin(); u != uses[i].end();
322 ++u) {
323 BasicBlock *tb = texes[i]->bb;
324 BasicBlock *ub = u->insn->bb;
325 if (tb == ub) {
326 u->level = 0;
327 for (size_t j = i + 1; j < texes.size() &&
328 texes[j]->bb == tb && texes[j]->serial < u->insn->serial;
329 ++j)
330 u->level++;
331 } else {
332 u->level = fn->cfg.findLightestPathWeight(&tb->cfg,
333 &ub->cfg, texCounts);
334 if (u->level < 0) {
335 WARN("Failed to find path TEX -> TEXBAR\n");
336 u->level = 0;
337 continue;
338 }
339 // this counted all TEXes in the origin block, correct that
340 u->level -= i - bbFirstTex.at(tb->getId()) + 1 /* this TEX */;
341 // and did not count the TEXes in the destination block, add those
342 for (size_t j = bbFirstTex.at(ub->getId()); j < texes.size() &&
343 texes[j]->bb == ub && texes[j]->serial < u->insn->serial;
344 ++j)
345 u->level++;
346 }
347 assert(u->level >= 0);
348 useVec.push_back(*u);
349 }
350 }
351 delete[] uses;
352 uses = NULL;
353
354 // insert the barriers
355 for (size_t i = 0; i < useVec.size(); ++i) {
356 Instruction *prev = useVec[i].insn->prev;
357 if (useVec[i].level < 0)
358 continue;
359 if (prev && prev->op == OP_TEXBAR) {
360 if (prev->subOp > useVec[i].level)
361 prev->subOp = useVec[i].level;
362 prev->setSrc(prev->srcCount(), useVec[i].tex->getDef(0));
363 } else {
364 Instruction *bar = new_Instruction(func, OP_TEXBAR, TYPE_NONE);
365 bar->fixed = 1;
366 bar->subOp = useVec[i].level;
367 // make use explicit to ease latency calculation
368 bar->setSrc(bar->srcCount(), useVec[i].tex->getDef(0));
369 useVec[i].insn->bb->insertBefore(useVec[i].insn, bar);
370 }
371 }
372
373 if (fn->getProgram()->optLevel < 3) {
374 if (uses)
375 delete[] uses;
376 return true;
377 }
378
379 std::vector<Limits> limitT, limitB, limitS; // entry, exit, single
380
381 limitT.resize(fn->allBBlocks.getSize(), Limits(0, 0));
382 limitB.resize(fn->allBBlocks.getSize(), Limits(0, 0));
383 limitS.resize(fn->allBBlocks.getSize());
384
385 // cull unneeded barriers (should do that earlier, but for simplicity)
386 IteratorRef bi = fn->cfg.iteratorCFG();
387 // first calculate min/max outstanding TEXes for each BB
388 for (bi->reset(); !bi->end(); bi->next()) {
389 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
390 BasicBlock *bb = BasicBlock::get(n);
391 int min = 0;
392 int max = std::numeric_limits<int>::max();
393 for (Instruction *i = bb->getFirst(); i; i = i->next) {
394 if (isTextureOp(i->op)) {
395 min++;
396 if (max < std::numeric_limits<int>::max())
397 max++;
398 } else
399 if (i->op == OP_TEXBAR) {
400 min = MIN2(min, i->subOp);
401 max = MIN2(max, i->subOp);
402 }
403 }
404 // limits when looking at an isolated block
405 limitS[bb->getId()].min = min;
406 limitS[bb->getId()].max = max;
407 }
408 // propagate the min/max values
409 for (unsigned int l = 0; l <= fn->loopNestingBound; ++l) {
410 for (bi->reset(); !bi->end(); bi->next()) {
411 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
412 BasicBlock *bb = BasicBlock::get(n);
413 const int bbId = bb->getId();
414 for (Graph::EdgeIterator ei = n->incident(); !ei.end(); ei.next()) {
415 BasicBlock *in = BasicBlock::get(ei.getNode());
416 const int inId = in->getId();
417 limitT[bbId].min = MAX2(limitT[bbId].min, limitB[inId].min);
418 limitT[bbId].max = MAX2(limitT[bbId].max, limitB[inId].max);
419 }
420 // I just hope this is correct ...
421 if (limitS[bbId].max == std::numeric_limits<int>::max()) {
422 // no barrier
423 limitB[bbId].min = limitT[bbId].min + limitS[bbId].min;
424 limitB[bbId].max = limitT[bbId].max + limitS[bbId].min;
425 } else {
426 // block contained a barrier
427 limitB[bbId].min = MIN2(limitS[bbId].max,
428 limitT[bbId].min + limitS[bbId].min);
429 limitB[bbId].max = MIN2(limitS[bbId].max,
430 limitT[bbId].max + limitS[bbId].min);
431 }
432 }
433 }
434 // finally delete unnecessary barriers
435 for (bi->reset(); !bi->end(); bi->next()) {
436 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
437 BasicBlock *bb = BasicBlock::get(n);
438 Instruction *prev = NULL;
439 Instruction *next;
440 int max = limitT[bb->getId()].max;
441 for (Instruction *i = bb->getFirst(); i; i = next) {
442 next = i->next;
443 if (i->op == OP_TEXBAR) {
444 if (i->subOp >= max) {
445 delete_Instruction(prog, i);
446 i = NULL;
447 } else {
448 max = i->subOp;
449 if (prev && prev->op == OP_TEXBAR && prev->subOp >= max) {
450 delete_Instruction(prog, prev);
451 prev = NULL;
452 }
453 }
454 } else
455 if (isTextureOp(i->op)) {
456 max++;
457 }
458 if (i && !i->isNop())
459 prev = i;
460 }
461 }
462 if (uses)
463 delete[] uses;
464 return true;
465 }
466
467 bool
468 NVC0LegalizePostRA::visit(Function *fn)
469 {
470 if (needTexBar)
471 insertTextureBarriers(fn);
472
473 rZero = new_LValue(fn, FILE_GPR);
474 carry = new_LValue(fn, FILE_FLAGS);
475
476 rZero->reg.data.id = prog->getTarget()->getFileSize(FILE_GPR);
477 carry->reg.data.id = 0;
478
479 return true;
480 }
481
482 void
483 NVC0LegalizePostRA::replaceZero(Instruction *i)
484 {
485 for (int s = 0; i->srcExists(s); ++s) {
486 if (s == 2 && i->op == OP_SUCLAMP)
487 continue;
488 ImmediateValue *imm = i->getSrc(s)->asImm();
489 if (imm && imm->reg.data.u64 == 0)
490 i->setSrc(s, rZero);
491 }
492 }
493
494 // replace CONT with BRA for single unconditional continue
495 bool
496 NVC0LegalizePostRA::tryReplaceContWithBra(BasicBlock *bb)
497 {
498 if (bb->cfg.incidentCount() != 2 || bb->getEntry()->op != OP_PRECONT)
499 return false;
500 Graph::EdgeIterator ei = bb->cfg.incident();
501 if (ei.getType() != Graph::Edge::BACK)
502 ei.next();
503 if (ei.getType() != Graph::Edge::BACK)
504 return false;
505 BasicBlock *contBB = BasicBlock::get(ei.getNode());
506
507 if (!contBB->getExit() || contBB->getExit()->op != OP_CONT ||
508 contBB->getExit()->getPredicate())
509 return false;
510 contBB->getExit()->op = OP_BRA;
511 bb->remove(bb->getEntry()); // delete PRECONT
512
513 ei.next();
514 assert(ei.end() || ei.getType() != Graph::Edge::BACK);
515 return true;
516 }
517
518 // replace branches to join blocks with join ops
519 void
520 NVC0LegalizePostRA::propagateJoin(BasicBlock *bb)
521 {
522 if (bb->getEntry()->op != OP_JOIN || bb->getEntry()->asFlow()->limit)
523 return;
524 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
525 BasicBlock *in = BasicBlock::get(ei.getNode());
526 Instruction *exit = in->getExit();
527 if (!exit) {
528 in->insertTail(new FlowInstruction(func, OP_JOIN, bb));
529 // there should always be a terminator instruction
530 WARN("inserted missing terminator in BB:%i\n", in->getId());
531 } else
532 if (exit->op == OP_BRA) {
533 exit->op = OP_JOIN;
534 exit->asFlow()->limit = 1; // must-not-propagate marker
535 }
536 }
537 bb->remove(bb->getEntry());
538 }
539
540 bool
541 NVC0LegalizePostRA::visit(BasicBlock *bb)
542 {
543 Instruction *i, *next;
544
545 // remove pseudo operations and non-fixed no-ops, split 64 bit operations
546 for (i = bb->getFirst(); i; i = next) {
547 next = i->next;
548 if (i->op == OP_EMIT || i->op == OP_RESTART) {
549 if (!i->getDef(0)->refCount())
550 i->setDef(0, NULL);
551 if (i->src(0).getFile() == FILE_IMMEDIATE)
552 i->setSrc(0, rZero); // initial value must be 0
553 } else
554 if (i->isNop()) {
555 bb->remove(i);
556 } else {
557 // TODO: Move this to before register allocation for operations that
558 // need the $c register !
559 if (typeSizeof(i->dType) == 8) {
560 Instruction *hi;
561 hi = BuildUtil::split64BitOpPostRA(func, i, rZero, carry);
562 if (hi)
563 next = hi;
564 }
565
566 if (i->op != OP_MOV && i->op != OP_PFETCH)
567 replaceZero(i);
568 }
569 }
570 if (!bb->getEntry())
571 return true;
572
573 if (!tryReplaceContWithBra(bb))
574 propagateJoin(bb);
575
576 return true;
577 }
578
579 class NVC0LoweringPass : public Pass
580 {
581 public:
582 NVC0LoweringPass(Program *);
583
584 private:
585 virtual bool visit(Function *);
586 virtual bool visit(BasicBlock *);
587 virtual bool visit(Instruction *);
588
589 bool handleRDSV(Instruction *);
590 bool handleWRSV(Instruction *);
591 bool handleEXPORT(Instruction *);
592 bool handleOUT(Instruction *);
593 bool handleDIV(Instruction *);
594 bool handleMOD(Instruction *);
595 bool handleSQRT(Instruction *);
596 bool handlePOW(Instruction *);
597 bool handleTEX(TexInstruction *);
598 bool handleTXD(TexInstruction *);
599 bool handleTXQ(TexInstruction *);
600 bool handleManualTXD(TexInstruction *);
601 bool handleATOM(Instruction *);
602 bool handleCasExch(Instruction *, bool needCctl);
603 void handleSurfaceOpNVE4(TexInstruction *);
604
605 void checkPredicate(Instruction *);
606
607 void readTessCoord(LValue *dst, int c);
608
609 Value *loadResInfo32(Value *ptr, uint32_t off);
610 Value *loadMsInfo32(Value *ptr, uint32_t off);
611 Value *loadTexHandle(Value *ptr, unsigned int slot);
612
613 void adjustCoordinatesMS(TexInstruction *);
614 void processSurfaceCoordsNVE4(TexInstruction *);
615
616 private:
617 const Target *const targ;
618
619 BuildUtil bld;
620
621 Symbol *gMemBase;
622 LValue *gpEmitAddress;
623 };
624
625 NVC0LoweringPass::NVC0LoweringPass(Program *prog) : targ(prog->getTarget())
626 {
627 bld.setProgram(prog);
628 gMemBase = NULL;
629 }
630
631 bool
632 NVC0LoweringPass::visit(Function *fn)
633 {
634 if (prog->getType() == Program::TYPE_GEOMETRY) {
635 assert(!strncmp(fn->getName(), "MAIN", 4));
636 // TODO: when we generate actual functions pass this value along somehow
637 bld.setPosition(BasicBlock::get(fn->cfg.getRoot()), false);
638 gpEmitAddress = bld.loadImm(NULL, 0)->asLValue();
639 if (fn->cfgExit) {
640 bld.setPosition(BasicBlock::get(fn->cfgExit)->getExit(), false);
641 bld.mkMovToReg(0, gpEmitAddress);
642 }
643 }
644 return true;
645 }
646
647 bool
648 NVC0LoweringPass::visit(BasicBlock *bb)
649 {
650 return true;
651 }
652
653 inline Value *
654 NVC0LoweringPass::loadTexHandle(Value *ptr, unsigned int slot)
655 {
656 uint8_t b = prog->driver->io.resInfoCBSlot;
657 uint32_t off = prog->driver->io.texBindBase + slot * 4;
658 return bld.
659 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
660 }
661
662 // move array source to first slot, convert to u16, add indirections
663 bool
664 NVC0LoweringPass::handleTEX(TexInstruction *i)
665 {
666 const int dim = i->tex.target.getDim() + i->tex.target.isCube();
667 const int arg = i->tex.target.getArgCount();
668 const int lyr = arg - (i->tex.target.isMS() ? 2 : 1);
669 const int chipset = prog->getTarget()->getChipset();
670
671 if (chipset >= NVISA_GK104_CHIPSET) {
672 if (i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
673 WARN("indirect TEX not implemented\n");
674 }
675 if (i->tex.r == i->tex.s) {
676 i->tex.r += prog->driver->io.texBindBase / 4;
677 i->tex.s = 0; // only a single cX[] value possible here
678 } else {
679 Value *hnd = bld.getScratch();
680 Value *rHnd = loadTexHandle(NULL, i->tex.r);
681 Value *sHnd = loadTexHandle(NULL, i->tex.s);
682
683 bld.mkOp3(OP_INSBF, TYPE_U32, hnd, rHnd, bld.mkImm(0x1400), sHnd);
684
685 i->tex.r = 0; // not used for indirect tex
686 i->tex.s = 0;
687 i->setIndirectR(hnd);
688 }
689 if (i->tex.target.isArray()) {
690 LValue *layer = new_LValue(func, FILE_GPR);
691 Value *src = i->getSrc(lyr);
692 const int sat = (i->op == OP_TXF) ? 1 : 0;
693 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
694 bld.mkCvt(OP_CVT, TYPE_U16, layer, sTy, src)->saturate = sat;
695 for (int s = dim; s >= 1; --s)
696 i->setSrc(s, i->getSrc(s - 1));
697 i->setSrc(0, layer);
698 }
699 } else
700 // (nvc0) generate and move the tsc/tic/array source to the front
701 if (i->tex.target.isArray() || i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
702 LValue *src = new_LValue(func, FILE_GPR); // 0xttxsaaaa
703
704 Value *arrayIndex = i->tex.target.isArray() ? i->getSrc(lyr) : NULL;
705 for (int s = dim; s >= 1; --s)
706 i->setSrc(s, i->getSrc(s - 1));
707 i->setSrc(0, arrayIndex);
708
709 Value *ticRel = i->getIndirectR();
710 Value *tscRel = i->getIndirectS();
711
712 if (arrayIndex) {
713 int sat = (i->op == OP_TXF) ? 1 : 0;
714 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
715 bld.mkCvt(OP_CVT, TYPE_U16, src, sTy, arrayIndex)->saturate = sat;
716 } else {
717 bld.loadImm(src, 0);
718 }
719
720 if (ticRel) {
721 i->setSrc(i->tex.rIndirectSrc, NULL);
722 bld.mkOp3(OP_INSBF, TYPE_U32, src, ticRel, bld.mkImm(0x0917), src);
723 }
724 if (tscRel) {
725 i->setSrc(i->tex.sIndirectSrc, NULL);
726 bld.mkOp3(OP_INSBF, TYPE_U32, src, tscRel, bld.mkImm(0x0710), src);
727 }
728
729 i->setSrc(0, src);
730 }
731
732 // For nvc0, the sample id has to be in the second operand, as the offset
733 // does. Right now we don't know how to pass both in, and this case can't
734 // happen with OpenGL. On nve0, the sample id is part of the texture
735 // coordinate argument.
736 assert(chipset >= NVISA_GK104_CHIPSET ||
737 !i->tex.useOffsets || !i->tex.target.isMS());
738
739 // offset is last source (lod 1st, dc 2nd)
740 if (i->tex.useOffsets) {
741 uint32_t value = 0;
742 int n, c;
743 int s = i->srcCount(0xff, true);
744 if (i->srcExists(s)) // move potential predicate out of the way
745 i->moveSources(s, 1);
746 for (n = 0; n < i->tex.useOffsets; ++n)
747 for (c = 0; c < 3; ++c)
748 value |= (i->tex.offset[n][c] & 0xf) << (n * 12 + c * 4);
749 i->setSrc(s, bld.loadImm(NULL, value));
750 }
751
752 if (chipset >= NVISA_GK104_CHIPSET) {
753 //
754 // If TEX requires more than 4 sources, the 2nd register tuple must be
755 // aligned to 4, even if it consists of just a single 4-byte register.
756 //
757 // XXX HACK: We insert 0 sources to avoid the 5 or 6 regs case.
758 //
759 int s = i->srcCount(0xff, true);
760 if (s > 4 && s < 7) {
761 if (i->srcExists(s)) // move potential predicate out of the way
762 i->moveSources(s, 7 - s);
763 while (s < 7)
764 i->setSrc(s++, bld.loadImm(NULL, 0));
765 }
766 }
767
768 return true;
769 }
770
771 bool
772 NVC0LoweringPass::handleManualTXD(TexInstruction *i)
773 {
774 static const uint8_t qOps[4][2] =
775 {
776 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(MOV2, MOV2, ADD, ADD) }, // l0
777 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(MOV2, MOV2, ADD, ADD) }, // l1
778 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l2
779 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l3
780 };
781 Value *def[4][4];
782 Value *crd[3];
783 Instruction *tex;
784 Value *zero = bld.loadImm(bld.getSSA(), 0);
785 int l, c;
786 const int dim = i->tex.target.getDim();
787
788 i->op = OP_TEX; // no need to clone dPdx/dPdy later
789
790 for (c = 0; c < dim; ++c)
791 crd[c] = bld.getScratch();
792
793 bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
794 for (l = 0; l < 4; ++l) {
795 // mov coordinates from lane l to all lanes
796 for (c = 0; c < dim; ++c)
797 bld.mkQuadop(0x00, crd[c], l, i->getSrc(c), zero);
798 // add dPdx from lane l to lanes dx
799 for (c = 0; c < dim; ++c)
800 bld.mkQuadop(qOps[l][0], crd[c], l, i->dPdx[c].get(), crd[c]);
801 // add dPdy from lane l to lanes dy
802 for (c = 0; c < dim; ++c)
803 bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
804 // texture
805 bld.insert(tex = cloneForward(func, i));
806 for (c = 0; c < dim; ++c)
807 tex->setSrc(c, crd[c]);
808 // save results
809 for (c = 0; i->defExists(c); ++c) {
810 Instruction *mov;
811 def[c][l] = bld.getSSA();
812 mov = bld.mkMov(def[c][l], tex->getDef(c));
813 mov->fixed = 1;
814 mov->lanes = 1 << l;
815 }
816 }
817 bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
818
819 for (c = 0; i->defExists(c); ++c) {
820 Instruction *u = bld.mkOp(OP_UNION, TYPE_U32, i->getDef(c));
821 for (l = 0; l < 4; ++l)
822 u->setSrc(l, def[c][l]);
823 }
824
825 i->bb->remove(i);
826 return true;
827 }
828
829 bool
830 NVC0LoweringPass::handleTXD(TexInstruction *txd)
831 {
832 int dim = txd->tex.target.getDim();
833 int arg = txd->tex.target.getArgCount();
834
835 handleTEX(txd);
836 while (txd->srcExists(arg))
837 ++arg;
838
839 txd->tex.derivAll = true;
840 if (dim > 2 ||
841 txd->tex.target.isCube() ||
842 arg > 4 ||
843 txd->tex.target.isShadow())
844 return handleManualTXD(txd);
845
846 for (int c = 0; c < dim; ++c) {
847 txd->setSrc(arg + c * 2 + 0, txd->dPdx[c]);
848 txd->setSrc(arg + c * 2 + 1, txd->dPdy[c]);
849 txd->dPdx[c].set(NULL);
850 txd->dPdy[c].set(NULL);
851 }
852 return true;
853 }
854
855 bool
856 NVC0LoweringPass::handleTXQ(TexInstruction *txq)
857 {
858 // TODO: indirect resource/sampler index
859 return true;
860 }
861
862 bool
863 NVC0LoweringPass::handleATOM(Instruction *atom)
864 {
865 SVSemantic sv;
866
867 switch (atom->src(0).getFile()) {
868 case FILE_MEMORY_LOCAL:
869 sv = SV_LBASE;
870 break;
871 case FILE_MEMORY_SHARED:
872 sv = SV_SBASE;
873 break;
874 default:
875 assert(atom->src(0).getFile() == FILE_MEMORY_GLOBAL);
876 return true;
877 }
878 Value *base =
879 bld.mkOp1v(OP_RDSV, TYPE_U32, bld.getScratch(), bld.mkSysVal(sv, 0));
880 Value *ptr = atom->getIndirect(0, 0);
881
882 atom->setSrc(0, cloneShallow(func, atom->getSrc(0)));
883 atom->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
884 if (ptr)
885 base = bld.mkOp2v(OP_ADD, TYPE_U32, base, base, ptr);
886 atom->setIndirect(0, 0, base);
887
888 return true;
889 }
890
891 bool
892 NVC0LoweringPass::handleCasExch(Instruction *cas, bool needCctl)
893 {
894 if (cas->subOp != NV50_IR_SUBOP_ATOM_CAS &&
895 cas->subOp != NV50_IR_SUBOP_ATOM_EXCH)
896 return false;
897 bld.setPosition(cas, true);
898
899 if (needCctl) {
900 Instruction *cctl = bld.mkOp1(OP_CCTL, TYPE_NONE, NULL, cas->getSrc(0));
901 cctl->setIndirect(0, 0, cas->getIndirect(0, 0));
902 cctl->fixed = 1;
903 cctl->subOp = NV50_IR_SUBOP_CCTL_IV;
904 if (cas->isPredicated())
905 cctl->setPredicate(cas->cc, cas->getPredicate());
906 }
907
908 if (cas->defExists(0) && cas->subOp == NV50_IR_SUBOP_ATOM_CAS) {
909 // CAS is crazy. It's 2nd source is a double reg, and the 3rd source
910 // should be set to the high part of the double reg or bad things will
911 // happen elsewhere in the universe.
912 // Also, it sometimes returns the new value instead of the old one
913 // under mysterious circumstances.
914 Value *dreg = bld.getSSA(8);
915 bld.setPosition(cas, false);
916 bld.mkOp2(OP_MERGE, TYPE_U64, dreg, cas->getSrc(1), cas->getSrc(2));
917 cas->setSrc(1, dreg);
918 }
919
920 return true;
921 }
922
923 inline Value *
924 NVC0LoweringPass::loadResInfo32(Value *ptr, uint32_t off)
925 {
926 uint8_t b = prog->driver->io.resInfoCBSlot;
927 off += prog->driver->io.suInfoBase;
928 return bld.
929 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
930 }
931
932 inline Value *
933 NVC0LoweringPass::loadMsInfo32(Value *ptr, uint32_t off)
934 {
935 uint8_t b = prog->driver->io.msInfoCBSlot;
936 off += prog->driver->io.msInfoBase;
937 return bld.
938 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
939 }
940
941 /* On nvc0, surface info is obtained via the surface binding points passed
942 * to the SULD/SUST instructions.
943 * On nve4, surface info is stored in c[] and is used by various special
944 * instructions, e.g. for clamping coordiantes or generating an address.
945 * They couldn't just have added an equivalent to TIC now, couldn't they ?
946 */
947 #define NVE4_SU_INFO_ADDR 0x00
948 #define NVE4_SU_INFO_FMT 0x04
949 #define NVE4_SU_INFO_DIM_X 0x08
950 #define NVE4_SU_INFO_PITCH 0x0c
951 #define NVE4_SU_INFO_DIM_Y 0x10
952 #define NVE4_SU_INFO_ARRAY 0x14
953 #define NVE4_SU_INFO_DIM_Z 0x18
954 #define NVE4_SU_INFO_UNK1C 0x1c
955 #define NVE4_SU_INFO_WIDTH 0x20
956 #define NVE4_SU_INFO_HEIGHT 0x24
957 #define NVE4_SU_INFO_DEPTH 0x28
958 #define NVE4_SU_INFO_TARGET 0x2c
959 #define NVE4_SU_INFO_CALL 0x30
960 #define NVE4_SU_INFO_RAW_X 0x34
961 #define NVE4_SU_INFO_MS_X 0x38
962 #define NVE4_SU_INFO_MS_Y 0x3c
963
964 #define NVE4_SU_INFO__STRIDE 0x40
965
966 #define NVE4_SU_INFO_DIM(i) (0x08 + (i) * 8)
967 #define NVE4_SU_INFO_SIZE(i) (0x20 + (i) * 4)
968 #define NVE4_SU_INFO_MS(i) (0x38 + (i) * 4)
969
970 static inline uint16_t getSuClampSubOp(const TexInstruction *su, int c)
971 {
972 switch (su->tex.target.getEnum()) {
973 case TEX_TARGET_BUFFER: return NV50_IR_SUBOP_SUCLAMP_PL(0, 1);
974 case TEX_TARGET_RECT: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
975 case TEX_TARGET_1D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
976 case TEX_TARGET_1D_ARRAY: return (c == 1) ?
977 NV50_IR_SUBOP_SUCLAMP_PL(0, 2) :
978 NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
979 case TEX_TARGET_2D: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
980 case TEX_TARGET_2D_MS: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
981 case TEX_TARGET_2D_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
982 case TEX_TARGET_2D_MS_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
983 case TEX_TARGET_3D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
984 case TEX_TARGET_CUBE: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
985 case TEX_TARGET_CUBE_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
986 default:
987 assert(0);
988 return 0;
989 }
990 }
991
992 void
993 NVC0LoweringPass::adjustCoordinatesMS(TexInstruction *tex)
994 {
995 const uint16_t base = tex->tex.r * NVE4_SU_INFO__STRIDE;
996 const int arg = tex->tex.target.getArgCount();
997
998 if (tex->tex.target == TEX_TARGET_2D_MS)
999 tex->tex.target = TEX_TARGET_2D;
1000 else
1001 if (tex->tex.target == TEX_TARGET_2D_MS_ARRAY)
1002 tex->tex.target = TEX_TARGET_2D_ARRAY;
1003 else
1004 return;
1005
1006 Value *x = tex->getSrc(0);
1007 Value *y = tex->getSrc(1);
1008 Value *s = tex->getSrc(arg - 1);
1009
1010 Value *tx = bld.getSSA(), *ty = bld.getSSA(), *ts = bld.getSSA();
1011
1012 Value *ms_x = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(0));
1013 Value *ms_y = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(1));
1014
1015 bld.mkOp2(OP_SHL, TYPE_U32, tx, x, ms_x);
1016 bld.mkOp2(OP_SHL, TYPE_U32, ty, y, ms_y);
1017
1018 s = bld.mkOp2v(OP_AND, TYPE_U32, ts, s, bld.loadImm(NULL, 0x7));
1019 s = bld.mkOp2v(OP_SHL, TYPE_U32, ts, ts, bld.mkImm(3));
1020
1021 Value *dx = loadMsInfo32(ts, 0x0);
1022 Value *dy = loadMsInfo32(ts, 0x4);
1023
1024 bld.mkOp2(OP_ADD, TYPE_U32, tx, tx, dx);
1025 bld.mkOp2(OP_ADD, TYPE_U32, ty, ty, dy);
1026
1027 tex->setSrc(0, tx);
1028 tex->setSrc(1, ty);
1029 tex->moveSources(arg, -1);
1030 }
1031
1032 // Sets 64-bit "generic address", predicate and format sources for SULD/SUST.
1033 // They're computed from the coordinates using the surface info in c[] space.
1034 void
1035 NVC0LoweringPass::processSurfaceCoordsNVE4(TexInstruction *su)
1036 {
1037 Instruction *insn;
1038 const bool atom = su->op == OP_SUREDB || su->op == OP_SUREDP;
1039 const bool raw =
1040 su->op == OP_SULDB || su->op == OP_SUSTB || su->op == OP_SUREDB;
1041 const int idx = su->tex.r;
1042 const int dim = su->tex.target.getDim();
1043 const int arg = dim + (su->tex.target.isArray() ? 1 : 0);
1044 const uint16_t base = idx * NVE4_SU_INFO__STRIDE;
1045 int c;
1046 Value *zero = bld.mkImm(0);
1047 Value *p1 = NULL;
1048 Value *v;
1049 Value *src[3];
1050 Value *bf, *eau, *off;
1051 Value *addr, *pred;
1052
1053 off = bld.getScratch(4);
1054 bf = bld.getScratch(4);
1055 addr = bld.getSSA(8);
1056 pred = bld.getScratch(1, FILE_PREDICATE);
1057
1058 bld.setPosition(su, false);
1059
1060 adjustCoordinatesMS(su);
1061
1062 // calculate clamped coordinates
1063 for (c = 0; c < arg; ++c) {
1064 src[c] = bld.getScratch();
1065 if (c == 0 && raw)
1066 v = loadResInfo32(NULL, base + NVE4_SU_INFO_RAW_X);
1067 else
1068 v = loadResInfo32(NULL, base + NVE4_SU_INFO_DIM(c));
1069 bld.mkOp3(OP_SUCLAMP, TYPE_S32, src[c], su->getSrc(c), v, zero)
1070 ->subOp = getSuClampSubOp(su, c);
1071 }
1072 for (; c < 3; ++c)
1073 src[c] = zero;
1074
1075 // set predicate output
1076 if (su->tex.target == TEX_TARGET_BUFFER) {
1077 src[0]->getInsn()->setFlagsDef(1, pred);
1078 } else
1079 if (su->tex.target.isArray()) {
1080 p1 = bld.getSSA(1, FILE_PREDICATE);
1081 src[dim]->getInsn()->setFlagsDef(1, p1);
1082 }
1083
1084 // calculate pixel offset
1085 if (dim == 1) {
1086 if (su->tex.target != TEX_TARGET_BUFFER)
1087 bld.mkOp2(OP_AND, TYPE_U32, off, src[0], bld.loadImm(NULL, 0xffff));
1088 } else
1089 if (dim == 3) {
1090 v = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
1091 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[2], v, src[1])
1092 ->subOp = NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1093
1094 v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
1095 bld.mkOp3(OP_MADSP, TYPE_U32, off, off, v, src[0])
1096 ->subOp = NV50_IR_SUBOP_MADSP(0,2,8); // u32 u16l u16l
1097 } else {
1098 assert(dim == 2);
1099 v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
1100 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[1], v, src[0])
1101 ->subOp = su->tex.target.isArray() ?
1102 NV50_IR_SUBOP_MADSP_SD : NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1103 }
1104
1105 // calculate effective address part 1
1106 if (su->tex.target == TEX_TARGET_BUFFER) {
1107 if (raw) {
1108 bf = src[0];
1109 } else {
1110 v = loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
1111 bld.mkOp3(OP_VSHL, TYPE_U32, bf, src[0], v, zero)
1112 ->subOp = NV50_IR_SUBOP_V1(7,6,8|2);
1113 }
1114 } else {
1115 Value *y = src[1];
1116 Value *z = src[2];
1117 uint16_t subOp = 0;
1118
1119 switch (dim) {
1120 case 1:
1121 y = zero;
1122 z = zero;
1123 break;
1124 case 2:
1125 z = off;
1126 if (!su->tex.target.isArray()) {
1127 z = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
1128 subOp = NV50_IR_SUBOP_SUBFM_3D;
1129 }
1130 break;
1131 default:
1132 subOp = NV50_IR_SUBOP_SUBFM_3D;
1133 assert(dim == 3);
1134 break;
1135 }
1136 insn = bld.mkOp3(OP_SUBFM, TYPE_U32, bf, src[0], y, z);
1137 insn->subOp = subOp;
1138 insn->setFlagsDef(1, pred);
1139 }
1140
1141 // part 2
1142 v = loadResInfo32(NULL, base + NVE4_SU_INFO_ADDR);
1143
1144 if (su->tex.target == TEX_TARGET_BUFFER) {
1145 eau = v;
1146 } else {
1147 eau = bld.mkOp3v(OP_SUEAU, TYPE_U32, bld.getScratch(4), off, bf, v);
1148 }
1149 // add array layer offset
1150 if (su->tex.target.isArray()) {
1151 v = loadResInfo32(NULL, base + NVE4_SU_INFO_ARRAY);
1152 if (dim == 1)
1153 bld.mkOp3(OP_MADSP, TYPE_U32, eau, src[1], v, eau)
1154 ->subOp = NV50_IR_SUBOP_MADSP(4,0,0); // u16 u24 u32
1155 else
1156 bld.mkOp3(OP_MADSP, TYPE_U32, eau, v, src[2], eau)
1157 ->subOp = NV50_IR_SUBOP_MADSP(0,0,0); // u32 u24 u32
1158 // combine predicates
1159 assert(p1);
1160 bld.mkOp2(OP_OR, TYPE_U8, pred, pred, p1);
1161 }
1162
1163 if (atom) {
1164 Value *lo = bf;
1165 if (su->tex.target == TEX_TARGET_BUFFER) {
1166 lo = zero;
1167 bld.mkMov(off, bf);
1168 }
1169 // bf == g[] address & 0xff
1170 // eau == g[] address >> 8
1171 bld.mkOp3(OP_PERMT, TYPE_U32, bf, lo, bld.loadImm(NULL, 0x6540), eau);
1172 bld.mkOp3(OP_PERMT, TYPE_U32, eau, zero, bld.loadImm(NULL, 0x0007), eau);
1173 } else
1174 if (su->op == OP_SULDP && su->tex.target == TEX_TARGET_BUFFER) {
1175 // Convert from u32 to u8 address format, which is what the library code
1176 // doing SULDP currently uses.
1177 // XXX: can SUEAU do this ?
1178 // XXX: does it matter that we don't mask high bytes in bf ?
1179 // Grrr.
1180 bld.mkOp2(OP_SHR, TYPE_U32, off, bf, bld.mkImm(8));
1181 bld.mkOp2(OP_ADD, TYPE_U32, eau, eau, off);
1182 }
1183
1184 bld.mkOp2(OP_MERGE, TYPE_U64, addr, bf, eau);
1185
1186 if (atom && su->tex.target == TEX_TARGET_BUFFER)
1187 bld.mkOp2(OP_ADD, TYPE_U64, addr, addr, off);
1188
1189 // let's just set it 0 for raw access and hope it works
1190 v = raw ?
1191 bld.mkImm(0) : loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
1192
1193 // get rid of old coordinate sources, make space for fmt info and predicate
1194 su->moveSources(arg, 3 - arg);
1195 // set 64 bit address and 32-bit format sources
1196 su->setSrc(0, addr);
1197 su->setSrc(1, v);
1198 su->setSrc(2, pred);
1199 }
1200
1201 void
1202 NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction *su)
1203 {
1204 processSurfaceCoordsNVE4(su);
1205
1206 // Who do we hate more ? The person who decided that nvc0's SULD doesn't
1207 // have to support conversion or the person who decided that, in OpenCL,
1208 // you don't have to specify the format here like you do in OpenGL ?
1209
1210 if (su->op == OP_SULDP) {
1211 // We don't patch shaders. Ever.
1212 // You get an indirect call to our library blob here.
1213 // But at least it's uniform.
1214 FlowInstruction *call;
1215 LValue *p[3];
1216 LValue *r[5];
1217 uint16_t base = su->tex.r * NVE4_SU_INFO__STRIDE + NVE4_SU_INFO_CALL;
1218
1219 for (int i = 0; i < 4; ++i)
1220 (r[i] = bld.getScratch(4, FILE_GPR))->reg.data.id = i;
1221 for (int i = 0; i < 3; ++i)
1222 (p[i] = bld.getScratch(1, FILE_PREDICATE))->reg.data.id = i;
1223 (r[4] = bld.getScratch(8, FILE_GPR))->reg.data.id = 4;
1224
1225 bld.mkMov(p[1], bld.mkImm((su->cache == CACHE_CA) ? 1 : 0), TYPE_U8);
1226 bld.mkMov(p[2], bld.mkImm((su->cache == CACHE_CG) ? 1 : 0), TYPE_U8);
1227 bld.mkMov(p[0], su->getSrc(2), TYPE_U8);
1228 bld.mkMov(r[4], su->getSrc(0), TYPE_U64);
1229 bld.mkMov(r[2], su->getSrc(1), TYPE_U32);
1230
1231 call = bld.mkFlow(OP_CALL, NULL, su->cc, su->getPredicate());
1232
1233 call->indirect = 1;
1234 call->absolute = 1;
1235 call->setSrc(0, bld.mkSymbol(FILE_MEMORY_CONST,
1236 prog->driver->io.resInfoCBSlot, TYPE_U32,
1237 prog->driver->io.suInfoBase + base));
1238 call->setSrc(1, r[2]);
1239 call->setSrc(2, r[4]);
1240 for (int i = 0; i < 3; ++i)
1241 call->setSrc(3 + i, p[i]);
1242 for (int i = 0; i < 4; ++i) {
1243 call->setDef(i, r[i]);
1244 bld.mkMov(su->getDef(i), r[i]);
1245 }
1246 call->setDef(4, p[1]);
1247 delete_Instruction(bld.getProgram(), su);
1248 }
1249
1250 if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
1251 // FIXME: for out of bounds access, destination value will be undefined !
1252 Value *pred = su->getSrc(2);
1253 CondCode cc = CC_NOT_P;
1254 if (su->getPredicate()) {
1255 pred = bld.getScratch(1, FILE_PREDICATE);
1256 cc = su->cc;
1257 if (cc == CC_NOT_P) {
1258 bld.mkOp2(OP_OR, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1259 } else {
1260 bld.mkOp2(OP_AND, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1261 pred->getInsn()->src(1).mod = Modifier(NV50_IR_MOD_NOT);
1262 }
1263 }
1264 Instruction *red = bld.mkOp(OP_ATOM, su->dType, su->getDef(0));
1265 red->subOp = su->subOp;
1266 if (!gMemBase)
1267 gMemBase = bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0);
1268 red->setSrc(0, gMemBase);
1269 red->setSrc(1, su->getSrc(3));
1270 if (su->subOp == NV50_IR_SUBOP_ATOM_CAS)
1271 red->setSrc(2, su->getSrc(4));
1272 red->setIndirect(0, 0, su->getSrc(0));
1273 red->setPredicate(cc, pred);
1274 delete_Instruction(bld.getProgram(), su);
1275 handleCasExch(red, true);
1276 } else {
1277 su->sType = (su->tex.target == TEX_TARGET_BUFFER) ? TYPE_U32 : TYPE_U8;
1278 }
1279 }
1280
1281 bool
1282 NVC0LoweringPass::handleWRSV(Instruction *i)
1283 {
1284 Instruction *st;
1285 Symbol *sym;
1286 uint32_t addr;
1287
1288 // must replace, $sreg are not writeable
1289 addr = targ->getSVAddress(FILE_SHADER_OUTPUT, i->getSrc(0)->asSym());
1290 if (addr >= 0x400)
1291 return false;
1292 sym = bld.mkSymbol(FILE_SHADER_OUTPUT, 0, i->sType, addr);
1293
1294 st = bld.mkStore(OP_EXPORT, i->dType, sym, i->getIndirect(0, 0),
1295 i->getSrc(1));
1296 st->perPatch = i->perPatch;
1297
1298 bld.getBB()->remove(i);
1299 return true;
1300 }
1301
1302 void
1303 NVC0LoweringPass::readTessCoord(LValue *dst, int c)
1304 {
1305 Value *laneid = bld.getSSA();
1306 Value *x, *y;
1307
1308 bld.mkOp1(OP_RDSV, TYPE_U32, laneid, bld.mkSysVal(SV_LANEID, 0));
1309
1310 if (c == 0) {
1311 x = dst;
1312 y = NULL;
1313 } else
1314 if (c == 1) {
1315 x = NULL;
1316 y = dst;
1317 } else {
1318 assert(c == 2);
1319 x = bld.getSSA();
1320 y = bld.getSSA();
1321 }
1322 if (x)
1323 bld.mkFetch(x, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f0, NULL, laneid);
1324 if (y)
1325 bld.mkFetch(y, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f4, NULL, laneid);
1326
1327 if (c == 2) {
1328 bld.mkOp2(OP_ADD, TYPE_F32, dst, x, y);
1329 bld.mkOp2(OP_SUB, TYPE_F32, dst, bld.loadImm(NULL, 1.0f), dst);
1330 }
1331 }
1332
1333 bool
1334 NVC0LoweringPass::handleRDSV(Instruction *i)
1335 {
1336 Symbol *sym = i->getSrc(0)->asSym();
1337 const SVSemantic sv = sym->reg.data.sv.sv;
1338 Value *vtx = NULL;
1339 Instruction *ld;
1340 uint32_t addr = targ->getSVAddress(FILE_SHADER_INPUT, sym);
1341
1342 if (addr >= 0x400) {
1343 // mov $sreg
1344 if (sym->reg.data.sv.index == 3) {
1345 // TGSI backend may use 4th component of TID,NTID,CTAID,NCTAID
1346 i->op = OP_MOV;
1347 i->setSrc(0, bld.mkImm((sv == SV_NTID || sv == SV_NCTAID) ? 1 : 0));
1348 }
1349 return true;
1350 }
1351
1352 switch (sv) {
1353 case SV_POSITION:
1354 assert(prog->getType() == Program::TYPE_FRAGMENT);
1355 bld.mkInterp(NV50_IR_INTERP_LINEAR, i->getDef(0), addr, NULL);
1356 break;
1357 case SV_FACE:
1358 {
1359 Value *face = i->getDef(0);
1360 bld.mkInterp(NV50_IR_INTERP_FLAT, face, addr, NULL);
1361 if (i->dType == TYPE_F32) {
1362 bld.mkOp2(OP_AND, TYPE_U32, face, face, bld.mkImm(0x80000000));
1363 bld.mkOp2(OP_XOR, TYPE_U32, face, face, bld.mkImm(0xbf800000));
1364 }
1365 }
1366 break;
1367 case SV_TESS_COORD:
1368 assert(prog->getType() == Program::TYPE_TESSELLATION_EVAL);
1369 readTessCoord(i->getDef(0)->asLValue(), i->getSrc(0)->reg.data.sv.index);
1370 break;
1371 case SV_NTID:
1372 case SV_NCTAID:
1373 case SV_GRIDID:
1374 assert(targ->getChipset() >= NVISA_GK104_CHIPSET); // mov $sreg otherwise
1375 if (sym->reg.data.sv.index == 3) {
1376 i->op = OP_MOV;
1377 i->setSrc(0, bld.mkImm(sv == SV_GRIDID ? 0 : 1));
1378 return true;
1379 }
1380 addr += prog->driver->prop.cp.gridInfoBase;
1381 bld.mkLoad(TYPE_U32, i->getDef(0),
1382 bld.mkSymbol(FILE_MEMORY_CONST, 0, TYPE_U32, addr), NULL);
1383 break;
1384 default:
1385 if (prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1386 vtx = bld.mkOp1v(OP_PFETCH, TYPE_U32, bld.getSSA(), bld.mkImm(0));
1387 ld = bld.mkFetch(i->getDef(0), i->dType,
1388 FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
1389 ld->perPatch = i->perPatch;
1390 break;
1391 }
1392 bld.getBB()->remove(i);
1393 return true;
1394 }
1395
1396 bool
1397 NVC0LoweringPass::handleDIV(Instruction *i)
1398 {
1399 if (!isFloatType(i->dType))
1400 return true;
1401 bld.setPosition(i, false);
1402 Instruction *rcp = bld.mkOp1(OP_RCP, i->dType, bld.getSSA(), i->getSrc(1));
1403 i->op = OP_MUL;
1404 i->setSrc(1, rcp->getDef(0));
1405 return true;
1406 }
1407
1408 bool
1409 NVC0LoweringPass::handleMOD(Instruction *i)
1410 {
1411 if (i->dType != TYPE_F32)
1412 return true;
1413 LValue *value = bld.getScratch();
1414 bld.mkOp1(OP_RCP, TYPE_F32, value, i->getSrc(1));
1415 bld.mkOp2(OP_MUL, TYPE_F32, value, i->getSrc(0), value);
1416 bld.mkOp1(OP_TRUNC, TYPE_F32, value, value);
1417 bld.mkOp2(OP_MUL, TYPE_F32, value, i->getSrc(1), value);
1418 i->op = OP_SUB;
1419 i->setSrc(1, value);
1420 return true;
1421 }
1422
1423 bool
1424 NVC0LoweringPass::handleSQRT(Instruction *i)
1425 {
1426 Instruction *rsq = bld.mkOp1(OP_RSQ, TYPE_F32,
1427 bld.getSSA(), i->getSrc(0));
1428 i->op = OP_MUL;
1429 i->setSrc(1, rsq->getDef(0));
1430
1431 return true;
1432 }
1433
1434 bool
1435 NVC0LoweringPass::handlePOW(Instruction *i)
1436 {
1437 LValue *val = bld.getScratch();
1438
1439 bld.mkOp1(OP_LG2, TYPE_F32, val, i->getSrc(0));
1440 bld.mkOp2(OP_MUL, TYPE_F32, val, i->getSrc(1), val)->dnz = 1;
1441 bld.mkOp1(OP_PREEX2, TYPE_F32, val, val);
1442
1443 i->op = OP_EX2;
1444 i->setSrc(0, val);
1445 i->setSrc(1, NULL);
1446
1447 return true;
1448 }
1449
1450 bool
1451 NVC0LoweringPass::handleEXPORT(Instruction *i)
1452 {
1453 if (prog->getType() == Program::TYPE_FRAGMENT) {
1454 int id = i->getSrc(0)->reg.data.offset / 4;
1455
1456 if (i->src(0).isIndirect(0)) // TODO, ugly
1457 return false;
1458 i->op = OP_MOV;
1459 i->subOp = NV50_IR_SUBOP_MOV_FINAL;
1460 i->src(0).set(i->src(1));
1461 i->setSrc(1, NULL);
1462 i->setDef(0, new_LValue(func, FILE_GPR));
1463 i->getDef(0)->reg.data.id = id;
1464
1465 prog->maxGPR = MAX2(prog->maxGPR, id);
1466 } else
1467 if (prog->getType() == Program::TYPE_GEOMETRY) {
1468 i->setIndirect(0, 1, gpEmitAddress);
1469 }
1470 return true;
1471 }
1472
1473 bool
1474 NVC0LoweringPass::handleOUT(Instruction *i)
1475 {
1476 if (i->op == OP_RESTART && i->prev && i->prev->op == OP_EMIT) {
1477 i->prev->subOp = NV50_IR_SUBOP_EMIT_RESTART;
1478 delete_Instruction(prog, i);
1479 } else {
1480 assert(gpEmitAddress);
1481 i->setDef(0, gpEmitAddress);
1482 if (i->srcExists(0))
1483 i->setSrc(1, i->getSrc(0));
1484 i->setSrc(0, gpEmitAddress);
1485 }
1486 return true;
1487 }
1488
1489 // Generate a binary predicate if an instruction is predicated by
1490 // e.g. an f32 value.
1491 void
1492 NVC0LoweringPass::checkPredicate(Instruction *insn)
1493 {
1494 Value *pred = insn->getPredicate();
1495 Value *pdst;
1496
1497 if (!pred || pred->reg.file == FILE_PREDICATE)
1498 return;
1499 pdst = new_LValue(func, FILE_PREDICATE);
1500
1501 // CAUTION: don't use pdst->getInsn, the definition might not be unique,
1502 // delay turning PSET(FSET(x,y),0) into PSET(x,y) to a later pass
1503
1504 bld.mkCmp(OP_SET, CC_NEU, insn->dType, pdst, insn->dType, bld.mkImm(0), pred);
1505
1506 insn->setPredicate(insn->cc, pdst);
1507 }
1508
1509 //
1510 // - add quadop dance for texturing
1511 // - put FP outputs in GPRs
1512 // - convert instruction sequences
1513 //
1514 bool
1515 NVC0LoweringPass::visit(Instruction *i)
1516 {
1517 bld.setPosition(i, false);
1518
1519 if (i->cc != CC_ALWAYS)
1520 checkPredicate(i);
1521
1522 switch (i->op) {
1523 case OP_TEX:
1524 case OP_TXB:
1525 case OP_TXL:
1526 case OP_TXF:
1527 case OP_TXG:
1528 return handleTEX(i->asTex());
1529 case OP_TXD:
1530 return handleTXD(i->asTex());
1531 case OP_TXQ:
1532 return handleTXQ(i->asTex());
1533 case OP_EX2:
1534 bld.mkOp1(OP_PREEX2, TYPE_F32, i->getDef(0), i->getSrc(0));
1535 i->setSrc(0, i->getDef(0));
1536 break;
1537 case OP_POW:
1538 return handlePOW(i);
1539 case OP_DIV:
1540 return handleDIV(i);
1541 case OP_MOD:
1542 return handleMOD(i);
1543 case OP_SQRT:
1544 return handleSQRT(i);
1545 case OP_EXPORT:
1546 return handleEXPORT(i);
1547 case OP_EMIT:
1548 case OP_RESTART:
1549 return handleOUT(i);
1550 case OP_RDSV:
1551 return handleRDSV(i);
1552 case OP_WRSV:
1553 return handleWRSV(i);
1554 case OP_LOAD:
1555 if (i->src(0).getFile() == FILE_SHADER_INPUT) {
1556 if (prog->getType() == Program::TYPE_COMPUTE) {
1557 i->getSrc(0)->reg.file = FILE_MEMORY_CONST;
1558 i->getSrc(0)->reg.fileIndex = 0;
1559 } else
1560 if (prog->getType() == Program::TYPE_GEOMETRY &&
1561 i->src(0).isIndirect(0)) {
1562 // XXX: this assumes vec4 units
1563 Value *ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
1564 i->getIndirect(0, 0), bld.mkImm(4));
1565 i->setIndirect(0, 0, ptr);
1566 } else {
1567 i->op = OP_VFETCH;
1568 assert(prog->getType() != Program::TYPE_FRAGMENT); // INTERP
1569 }
1570 }
1571 break;
1572 case OP_ATOM:
1573 {
1574 const bool cctl = i->src(0).getFile() == FILE_MEMORY_GLOBAL;
1575 handleATOM(i);
1576 handleCasExch(i, cctl);
1577 }
1578 break;
1579 case OP_SULDB:
1580 case OP_SULDP:
1581 case OP_SUSTB:
1582 case OP_SUSTP:
1583 case OP_SUREDB:
1584 case OP_SUREDP:
1585 if (targ->getChipset() >= NVISA_GK104_CHIPSET)
1586 handleSurfaceOpNVE4(i->asTex());
1587 break;
1588 default:
1589 break;
1590 }
1591 return true;
1592 }
1593
1594 bool
1595 TargetNVC0::runLegalizePass(Program *prog, CGStage stage) const
1596 {
1597 if (stage == CG_STAGE_PRE_SSA) {
1598 NVC0LoweringPass pass(prog);
1599 return pass.run(prog, false, true);
1600 } else
1601 if (stage == CG_STAGE_POST_RA) {
1602 NVC0LegalizePostRA pass(prog);
1603 return pass.run(prog, false, true);
1604 } else
1605 if (stage == CG_STAGE_SSA) {
1606 NVC0LegalizeSSA pass;
1607 return pass.run(prog, false, true);
1608 }
1609 return false;
1610 }
1611
1612 } // namespace nv50_ir