nvc0/ir: add support for SAMPLEMASK sysval
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_lowering_nvc0.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_build_util.h"
25
26 #include "codegen/nv50_ir_target_nvc0.h"
27
28 #include <limits>
29
30 namespace nv50_ir {
31
32 #define QOP_ADD 0
33 #define QOP_SUBR 1
34 #define QOP_SUB 2
35 #define QOP_MOV2 3
36
37 // UL UR LL LR
38 #define QUADOP(q, r, s, t) \
39 ((QOP_##q << 6) | (QOP_##r << 4) | \
40 (QOP_##s << 2) | (QOP_##t << 0))
41
42 class NVC0LegalizeSSA : public Pass
43 {
44 private:
45 virtual bool visit(BasicBlock *);
46 virtual bool visit(Function *);
47
48 // we want to insert calls to the builtin library only after optimization
49 void handleDIV(Instruction *); // integer division, modulus
50 void handleRCPRSQ(Instruction *); // double precision float recip/rsqrt
51
52 private:
53 BuildUtil bld;
54 };
55
56 void
57 NVC0LegalizeSSA::handleDIV(Instruction *i)
58 {
59 FlowInstruction *call;
60 int builtin;
61 Value *def[2];
62
63 bld.setPosition(i, false);
64 def[0] = bld.mkMovToReg(0, i->getSrc(0))->getDef(0);
65 def[1] = bld.mkMovToReg(1, i->getSrc(1))->getDef(0);
66 switch (i->dType) {
67 case TYPE_U32: builtin = NVC0_BUILTIN_DIV_U32; break;
68 case TYPE_S32: builtin = NVC0_BUILTIN_DIV_S32; break;
69 default:
70 return;
71 }
72 call = bld.mkFlow(OP_CALL, NULL, CC_ALWAYS, NULL);
73 bld.mkMov(i->getDef(0), def[(i->op == OP_DIV) ? 0 : 1]);
74 bld.mkClobber(FILE_GPR, (i->op == OP_DIV) ? 0xe : 0xd, 2);
75 bld.mkClobber(FILE_PREDICATE, (i->dType == TYPE_S32) ? 0xf : 0x3, 0);
76
77 call->fixed = 1;
78 call->absolute = call->builtin = 1;
79 call->target.builtin = builtin;
80 delete_Instruction(prog, i);
81 }
82
83 void
84 NVC0LegalizeSSA::handleRCPRSQ(Instruction *i)
85 {
86 // TODO
87 }
88
89 bool
90 NVC0LegalizeSSA::visit(Function *fn)
91 {
92 bld.setProgram(fn->getProgram());
93 return true;
94 }
95
96 bool
97 NVC0LegalizeSSA::visit(BasicBlock *bb)
98 {
99 Instruction *next;
100 for (Instruction *i = bb->getEntry(); i; i = next) {
101 next = i->next;
102 if (i->dType == TYPE_F32)
103 continue;
104 switch (i->op) {
105 case OP_DIV:
106 case OP_MOD:
107 handleDIV(i);
108 break;
109 case OP_RCP:
110 case OP_RSQ:
111 if (i->dType == TYPE_F64)
112 handleRCPRSQ(i);
113 break;
114 default:
115 break;
116 }
117 }
118 return true;
119 }
120
121 class NVC0LegalizePostRA : public Pass
122 {
123 public:
124 NVC0LegalizePostRA(const Program *);
125
126 private:
127 virtual bool visit(Function *);
128 virtual bool visit(BasicBlock *);
129
130 void replaceZero(Instruction *);
131 bool tryReplaceContWithBra(BasicBlock *);
132 void propagateJoin(BasicBlock *);
133
134 struct TexUse
135 {
136 TexUse(Instruction *use, const Instruction *tex)
137 : insn(use), tex(tex), level(-1) { }
138 Instruction *insn;
139 const Instruction *tex; // or split / mov
140 int level;
141 };
142 struct Limits
143 {
144 Limits() { }
145 Limits(int min, int max) : min(min), max(max) { }
146 int min, max;
147 };
148 bool insertTextureBarriers(Function *);
149 inline bool insnDominatedBy(const Instruction *, const Instruction *) const;
150 void findFirstUses(const Instruction *tex, const Instruction *def,
151 std::list<TexUse>&);
152 void findOverwritingDefs(const Instruction *tex, Instruction *insn,
153 const BasicBlock *term,
154 std::list<TexUse>&);
155 void addTexUse(std::list<TexUse>&, Instruction *, const Instruction *);
156 const Instruction *recurseDef(const Instruction *);
157
158 private:
159 LValue *rZero;
160 LValue *carry;
161 const bool needTexBar;
162 };
163
164 NVC0LegalizePostRA::NVC0LegalizePostRA(const Program *prog)
165 : rZero(NULL),
166 carry(NULL),
167 needTexBar(prog->getTarget()->getChipset() >= 0xe0)
168 {
169 }
170
171 bool
172 NVC0LegalizePostRA::insnDominatedBy(const Instruction *later,
173 const Instruction *early) const
174 {
175 if (early->bb == later->bb)
176 return early->serial < later->serial;
177 return later->bb->dominatedBy(early->bb);
178 }
179
180 void
181 NVC0LegalizePostRA::addTexUse(std::list<TexUse> &uses,
182 Instruction *usei, const Instruction *insn)
183 {
184 bool add = true;
185 for (std::list<TexUse>::iterator it = uses.begin();
186 it != uses.end();) {
187 if (insnDominatedBy(usei, it->insn)) {
188 add = false;
189 break;
190 }
191 if (insnDominatedBy(it->insn, usei))
192 it = uses.erase(it);
193 else
194 ++it;
195 }
196 if (add)
197 uses.push_back(TexUse(usei, insn));
198 }
199
200 void
201 NVC0LegalizePostRA::findOverwritingDefs(const Instruction *texi,
202 Instruction *insn,
203 const BasicBlock *term,
204 std::list<TexUse> &uses)
205 {
206 while (insn->op == OP_MOV && insn->getDef(0)->equals(insn->getSrc(0)))
207 insn = insn->getSrc(0)->getUniqueInsn();
208
209 if (!insn || !insn->bb->reachableBy(texi->bb, term))
210 return;
211
212 switch (insn->op) {
213 /* Values not connected to the tex's definition through any of these should
214 * not be conflicting.
215 */
216 case OP_SPLIT:
217 case OP_MERGE:
218 case OP_PHI:
219 case OP_UNION:
220 /* recurse again */
221 for (int s = 0; insn->srcExists(s); ++s)
222 findOverwritingDefs(texi, insn->getSrc(s)->getUniqueInsn(), term,
223 uses);
224 break;
225 default:
226 // if (!isTextureOp(insn->op)) // TODO: are TEXes always ordered ?
227 addTexUse(uses, insn, texi);
228 break;
229 }
230 }
231
232 void
233 NVC0LegalizePostRA::findFirstUses(const Instruction *texi,
234 const Instruction *insn,
235 std::list<TexUse> &uses)
236 {
237 for (int d = 0; insn->defExists(d); ++d) {
238 Value *v = insn->getDef(d);
239 for (Value::UseIterator u = v->uses.begin(); u != v->uses.end(); ++u) {
240 Instruction *usei = (*u)->getInsn();
241
242 if (usei->op == OP_PHI || usei->op == OP_UNION) {
243 // need a barrier before WAW cases
244 for (int s = 0; usei->srcExists(s); ++s) {
245 Instruction *defi = usei->getSrc(s)->getUniqueInsn();
246 if (defi && &usei->src(s) != *u)
247 findOverwritingDefs(texi, defi, usei->bb, uses);
248 }
249 }
250
251 if (usei->op == OP_SPLIT ||
252 usei->op == OP_MERGE ||
253 usei->op == OP_PHI ||
254 usei->op == OP_UNION) {
255 // these uses don't manifest in the machine code
256 findFirstUses(texi, usei, uses);
257 } else
258 if (usei->op == OP_MOV && usei->getDef(0)->equals(usei->getSrc(0)) &&
259 usei->subOp != NV50_IR_SUBOP_MOV_FINAL) {
260 findFirstUses(texi, usei, uses);
261 } else {
262 addTexUse(uses, usei, insn);
263 }
264 }
265 }
266 }
267
268 // Texture barriers:
269 // This pass is a bit long and ugly and can probably be optimized.
270 //
271 // 1. obtain a list of TEXes and their outputs' first use(s)
272 // 2. calculate the barrier level of each first use (minimal number of TEXes,
273 // over all paths, between the TEX and the use in question)
274 // 3. for each barrier, if all paths from the source TEX to that barrier
275 // contain a barrier of lesser level, it can be culled
276 bool
277 NVC0LegalizePostRA::insertTextureBarriers(Function *fn)
278 {
279 std::list<TexUse> *uses;
280 std::vector<Instruction *> texes;
281 std::vector<int> bbFirstTex;
282 std::vector<int> bbFirstUse;
283 std::vector<int> texCounts;
284 std::vector<TexUse> useVec;
285 ArrayList insns;
286
287 fn->orderInstructions(insns);
288
289 texCounts.resize(fn->allBBlocks.getSize(), 0);
290 bbFirstTex.resize(fn->allBBlocks.getSize(), insns.getSize());
291 bbFirstUse.resize(fn->allBBlocks.getSize(), insns.getSize());
292
293 // tag BB CFG nodes by their id for later
294 for (ArrayList::Iterator i = fn->allBBlocks.iterator(); !i.end(); i.next()) {
295 BasicBlock *bb = reinterpret_cast<BasicBlock *>(i.get());
296 if (bb)
297 bb->cfg.tag = bb->getId();
298 }
299
300 // gather the first uses for each TEX
301 for (int i = 0; i < insns.getSize(); ++i) {
302 Instruction *tex = reinterpret_cast<Instruction *>(insns.get(i));
303 if (isTextureOp(tex->op)) {
304 texes.push_back(tex);
305 if (!texCounts.at(tex->bb->getId()))
306 bbFirstTex[tex->bb->getId()] = texes.size() - 1;
307 texCounts[tex->bb->getId()]++;
308 }
309 }
310 insns.clear();
311 if (texes.empty())
312 return false;
313 uses = new std::list<TexUse>[texes.size()];
314 if (!uses)
315 return false;
316 for (size_t i = 0; i < texes.size(); ++i)
317 findFirstUses(texes[i], texes[i], uses[i]);
318
319 // determine the barrier level at each use
320 for (size_t i = 0; i < texes.size(); ++i) {
321 for (std::list<TexUse>::iterator u = uses[i].begin(); u != uses[i].end();
322 ++u) {
323 BasicBlock *tb = texes[i]->bb;
324 BasicBlock *ub = u->insn->bb;
325 if (tb == ub) {
326 u->level = 0;
327 for (size_t j = i + 1; j < texes.size() &&
328 texes[j]->bb == tb && texes[j]->serial < u->insn->serial;
329 ++j)
330 u->level++;
331 } else {
332 u->level = fn->cfg.findLightestPathWeight(&tb->cfg,
333 &ub->cfg, texCounts);
334 if (u->level < 0) {
335 WARN("Failed to find path TEX -> TEXBAR\n");
336 u->level = 0;
337 continue;
338 }
339 // this counted all TEXes in the origin block, correct that
340 u->level -= i - bbFirstTex.at(tb->getId()) + 1 /* this TEX */;
341 // and did not count the TEXes in the destination block, add those
342 for (size_t j = bbFirstTex.at(ub->getId()); j < texes.size() &&
343 texes[j]->bb == ub && texes[j]->serial < u->insn->serial;
344 ++j)
345 u->level++;
346 }
347 assert(u->level >= 0);
348 useVec.push_back(*u);
349 }
350 }
351 delete[] uses;
352 uses = NULL;
353
354 // insert the barriers
355 for (size_t i = 0; i < useVec.size(); ++i) {
356 Instruction *prev = useVec[i].insn->prev;
357 if (useVec[i].level < 0)
358 continue;
359 if (prev && prev->op == OP_TEXBAR) {
360 if (prev->subOp > useVec[i].level)
361 prev->subOp = useVec[i].level;
362 prev->setSrc(prev->srcCount(), useVec[i].tex->getDef(0));
363 } else {
364 Instruction *bar = new_Instruction(func, OP_TEXBAR, TYPE_NONE);
365 bar->fixed = 1;
366 bar->subOp = useVec[i].level;
367 // make use explicit to ease latency calculation
368 bar->setSrc(bar->srcCount(), useVec[i].tex->getDef(0));
369 useVec[i].insn->bb->insertBefore(useVec[i].insn, bar);
370 }
371 }
372
373 if (fn->getProgram()->optLevel < 3) {
374 if (uses)
375 delete[] uses;
376 return true;
377 }
378
379 std::vector<Limits> limitT, limitB, limitS; // entry, exit, single
380
381 limitT.resize(fn->allBBlocks.getSize(), Limits(0, 0));
382 limitB.resize(fn->allBBlocks.getSize(), Limits(0, 0));
383 limitS.resize(fn->allBBlocks.getSize());
384
385 // cull unneeded barriers (should do that earlier, but for simplicity)
386 IteratorRef bi = fn->cfg.iteratorCFG();
387 // first calculate min/max outstanding TEXes for each BB
388 for (bi->reset(); !bi->end(); bi->next()) {
389 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
390 BasicBlock *bb = BasicBlock::get(n);
391 int min = 0;
392 int max = std::numeric_limits<int>::max();
393 for (Instruction *i = bb->getFirst(); i; i = i->next) {
394 if (isTextureOp(i->op)) {
395 min++;
396 if (max < std::numeric_limits<int>::max())
397 max++;
398 } else
399 if (i->op == OP_TEXBAR) {
400 min = MIN2(min, i->subOp);
401 max = MIN2(max, i->subOp);
402 }
403 }
404 // limits when looking at an isolated block
405 limitS[bb->getId()].min = min;
406 limitS[bb->getId()].max = max;
407 }
408 // propagate the min/max values
409 for (unsigned int l = 0; l <= fn->loopNestingBound; ++l) {
410 for (bi->reset(); !bi->end(); bi->next()) {
411 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
412 BasicBlock *bb = BasicBlock::get(n);
413 const int bbId = bb->getId();
414 for (Graph::EdgeIterator ei = n->incident(); !ei.end(); ei.next()) {
415 BasicBlock *in = BasicBlock::get(ei.getNode());
416 const int inId = in->getId();
417 limitT[bbId].min = MAX2(limitT[bbId].min, limitB[inId].min);
418 limitT[bbId].max = MAX2(limitT[bbId].max, limitB[inId].max);
419 }
420 // I just hope this is correct ...
421 if (limitS[bbId].max == std::numeric_limits<int>::max()) {
422 // no barrier
423 limitB[bbId].min = limitT[bbId].min + limitS[bbId].min;
424 limitB[bbId].max = limitT[bbId].max + limitS[bbId].min;
425 } else {
426 // block contained a barrier
427 limitB[bbId].min = MIN2(limitS[bbId].max,
428 limitT[bbId].min + limitS[bbId].min);
429 limitB[bbId].max = MIN2(limitS[bbId].max,
430 limitT[bbId].max + limitS[bbId].min);
431 }
432 }
433 }
434 // finally delete unnecessary barriers
435 for (bi->reset(); !bi->end(); bi->next()) {
436 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
437 BasicBlock *bb = BasicBlock::get(n);
438 Instruction *prev = NULL;
439 Instruction *next;
440 int max = limitT[bb->getId()].max;
441 for (Instruction *i = bb->getFirst(); i; i = next) {
442 next = i->next;
443 if (i->op == OP_TEXBAR) {
444 if (i->subOp >= max) {
445 delete_Instruction(prog, i);
446 i = NULL;
447 } else {
448 max = i->subOp;
449 if (prev && prev->op == OP_TEXBAR && prev->subOp >= max) {
450 delete_Instruction(prog, prev);
451 prev = NULL;
452 }
453 }
454 } else
455 if (isTextureOp(i->op)) {
456 max++;
457 }
458 if (i && !i->isNop())
459 prev = i;
460 }
461 }
462 if (uses)
463 delete[] uses;
464 return true;
465 }
466
467 bool
468 NVC0LegalizePostRA::visit(Function *fn)
469 {
470 if (needTexBar)
471 insertTextureBarriers(fn);
472
473 rZero = new_LValue(fn, FILE_GPR);
474 carry = new_LValue(fn, FILE_FLAGS);
475
476 rZero->reg.data.id = prog->getTarget()->getFileSize(FILE_GPR);
477 carry->reg.data.id = 0;
478
479 return true;
480 }
481
482 void
483 NVC0LegalizePostRA::replaceZero(Instruction *i)
484 {
485 for (int s = 0; i->srcExists(s); ++s) {
486 if (s == 2 && i->op == OP_SUCLAMP)
487 continue;
488 ImmediateValue *imm = i->getSrc(s)->asImm();
489 if (imm && imm->reg.data.u64 == 0)
490 i->setSrc(s, rZero);
491 }
492 }
493
494 // replace CONT with BRA for single unconditional continue
495 bool
496 NVC0LegalizePostRA::tryReplaceContWithBra(BasicBlock *bb)
497 {
498 if (bb->cfg.incidentCount() != 2 || bb->getEntry()->op != OP_PRECONT)
499 return false;
500 Graph::EdgeIterator ei = bb->cfg.incident();
501 if (ei.getType() != Graph::Edge::BACK)
502 ei.next();
503 if (ei.getType() != Graph::Edge::BACK)
504 return false;
505 BasicBlock *contBB = BasicBlock::get(ei.getNode());
506
507 if (!contBB->getExit() || contBB->getExit()->op != OP_CONT ||
508 contBB->getExit()->getPredicate())
509 return false;
510 contBB->getExit()->op = OP_BRA;
511 bb->remove(bb->getEntry()); // delete PRECONT
512
513 ei.next();
514 assert(ei.end() || ei.getType() != Graph::Edge::BACK);
515 return true;
516 }
517
518 // replace branches to join blocks with join ops
519 void
520 NVC0LegalizePostRA::propagateJoin(BasicBlock *bb)
521 {
522 if (bb->getEntry()->op != OP_JOIN || bb->getEntry()->asFlow()->limit)
523 return;
524 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
525 BasicBlock *in = BasicBlock::get(ei.getNode());
526 Instruction *exit = in->getExit();
527 if (!exit) {
528 in->insertTail(new FlowInstruction(func, OP_JOIN, bb));
529 // there should always be a terminator instruction
530 WARN("inserted missing terminator in BB:%i\n", in->getId());
531 } else
532 if (exit->op == OP_BRA) {
533 exit->op = OP_JOIN;
534 exit->asFlow()->limit = 1; // must-not-propagate marker
535 }
536 }
537 bb->remove(bb->getEntry());
538 }
539
540 bool
541 NVC0LegalizePostRA::visit(BasicBlock *bb)
542 {
543 Instruction *i, *next;
544
545 // remove pseudo operations and non-fixed no-ops, split 64 bit operations
546 for (i = bb->getFirst(); i; i = next) {
547 next = i->next;
548 if (i->op == OP_EMIT || i->op == OP_RESTART) {
549 if (!i->getDef(0)->refCount())
550 i->setDef(0, NULL);
551 if (i->src(0).getFile() == FILE_IMMEDIATE)
552 i->setSrc(0, rZero); // initial value must be 0
553 } else
554 if (i->isNop()) {
555 bb->remove(i);
556 } else {
557 // TODO: Move this to before register allocation for operations that
558 // need the $c register !
559 if (typeSizeof(i->dType) == 8) {
560 Instruction *hi;
561 hi = BuildUtil::split64BitOpPostRA(func, i, rZero, carry);
562 if (hi)
563 next = hi;
564 }
565
566 if (i->op != OP_MOV && i->op != OP_PFETCH)
567 replaceZero(i);
568 }
569 }
570 if (!bb->getEntry())
571 return true;
572
573 if (!tryReplaceContWithBra(bb))
574 propagateJoin(bb);
575
576 return true;
577 }
578
579 class NVC0LoweringPass : public Pass
580 {
581 public:
582 NVC0LoweringPass(Program *);
583
584 private:
585 virtual bool visit(Function *);
586 virtual bool visit(BasicBlock *);
587 virtual bool visit(Instruction *);
588
589 bool handleRDSV(Instruction *);
590 bool handleWRSV(Instruction *);
591 bool handleEXPORT(Instruction *);
592 bool handleOUT(Instruction *);
593 bool handleDIV(Instruction *);
594 bool handleMOD(Instruction *);
595 bool handleSQRT(Instruction *);
596 bool handlePOW(Instruction *);
597 bool handleTEX(TexInstruction *);
598 bool handleTXD(TexInstruction *);
599 bool handleTXQ(TexInstruction *);
600 bool handleManualTXD(TexInstruction *);
601 bool handleTXLQ(TexInstruction *);
602 bool handleATOM(Instruction *);
603 bool handleCasExch(Instruction *, bool needCctl);
604 void handleSurfaceOpNVE4(TexInstruction *);
605
606 void checkPredicate(Instruction *);
607
608 void readTessCoord(LValue *dst, int c);
609
610 Value *loadResInfo32(Value *ptr, uint32_t off);
611 Value *loadMsInfo32(Value *ptr, uint32_t off);
612 Value *loadTexHandle(Value *ptr, unsigned int slot);
613
614 void adjustCoordinatesMS(TexInstruction *);
615 void processSurfaceCoordsNVE4(TexInstruction *);
616
617 private:
618 const Target *const targ;
619
620 BuildUtil bld;
621
622 Symbol *gMemBase;
623 LValue *gpEmitAddress;
624 };
625
626 NVC0LoweringPass::NVC0LoweringPass(Program *prog) : targ(prog->getTarget())
627 {
628 bld.setProgram(prog);
629 gMemBase = NULL;
630 }
631
632 bool
633 NVC0LoweringPass::visit(Function *fn)
634 {
635 if (prog->getType() == Program::TYPE_GEOMETRY) {
636 assert(!strncmp(fn->getName(), "MAIN", 4));
637 // TODO: when we generate actual functions pass this value along somehow
638 bld.setPosition(BasicBlock::get(fn->cfg.getRoot()), false);
639 gpEmitAddress = bld.loadImm(NULL, 0)->asLValue();
640 if (fn->cfgExit) {
641 bld.setPosition(BasicBlock::get(fn->cfgExit)->getExit(), false);
642 bld.mkMovToReg(0, gpEmitAddress);
643 }
644 }
645 return true;
646 }
647
648 bool
649 NVC0LoweringPass::visit(BasicBlock *bb)
650 {
651 return true;
652 }
653
654 inline Value *
655 NVC0LoweringPass::loadTexHandle(Value *ptr, unsigned int slot)
656 {
657 uint8_t b = prog->driver->io.resInfoCBSlot;
658 uint32_t off = prog->driver->io.texBindBase + slot * 4;
659 return bld.
660 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
661 }
662
663 // move array source to first slot, convert to u16, add indirections
664 bool
665 NVC0LoweringPass::handleTEX(TexInstruction *i)
666 {
667 const int dim = i->tex.target.getDim() + i->tex.target.isCube();
668 const int arg = i->tex.target.getArgCount();
669 const int lyr = arg - (i->tex.target.isMS() ? 2 : 1);
670 const int chipset = prog->getTarget()->getChipset();
671
672 if (chipset >= NVISA_GK104_CHIPSET) {
673 if (i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
674 WARN("indirect TEX not implemented\n");
675 }
676 if (i->tex.r == i->tex.s) {
677 i->tex.r += prog->driver->io.texBindBase / 4;
678 i->tex.s = 0; // only a single cX[] value possible here
679 } else {
680 Value *hnd = bld.getScratch();
681 Value *rHnd = loadTexHandle(NULL, i->tex.r);
682 Value *sHnd = loadTexHandle(NULL, i->tex.s);
683
684 bld.mkOp3(OP_INSBF, TYPE_U32, hnd, rHnd, bld.mkImm(0x1400), sHnd);
685
686 i->tex.r = 0; // not used for indirect tex
687 i->tex.s = 0;
688 i->setIndirectR(hnd);
689 }
690 if (i->tex.target.isArray()) {
691 LValue *layer = new_LValue(func, FILE_GPR);
692 Value *src = i->getSrc(lyr);
693 const int sat = (i->op == OP_TXF) ? 1 : 0;
694 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
695 bld.mkCvt(OP_CVT, TYPE_U16, layer, sTy, src)->saturate = sat;
696 for (int s = dim; s >= 1; --s)
697 i->setSrc(s, i->getSrc(s - 1));
698 i->setSrc(0, layer);
699 }
700 } else
701 // (nvc0) generate and move the tsc/tic/array source to the front
702 if (i->tex.target.isArray() || i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
703 LValue *src = new_LValue(func, FILE_GPR); // 0xttxsaaaa
704
705 Value *arrayIndex = i->tex.target.isArray() ? i->getSrc(lyr) : NULL;
706 for (int s = dim; s >= 1; --s)
707 i->setSrc(s, i->getSrc(s - 1));
708 i->setSrc(0, arrayIndex);
709
710 Value *ticRel = i->getIndirectR();
711 Value *tscRel = i->getIndirectS();
712
713 if (arrayIndex) {
714 int sat = (i->op == OP_TXF) ? 1 : 0;
715 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
716 bld.mkCvt(OP_CVT, TYPE_U16, src, sTy, arrayIndex)->saturate = sat;
717 } else {
718 bld.loadImm(src, 0);
719 }
720
721 if (ticRel) {
722 i->setSrc(i->tex.rIndirectSrc, NULL);
723 bld.mkOp3(OP_INSBF, TYPE_U32, src, ticRel, bld.mkImm(0x0917), src);
724 }
725 if (tscRel) {
726 i->setSrc(i->tex.sIndirectSrc, NULL);
727 bld.mkOp3(OP_INSBF, TYPE_U32, src, tscRel, bld.mkImm(0x0710), src);
728 }
729
730 i->setSrc(0, src);
731 }
732
733 // For nvc0, the sample id has to be in the second operand, as the offset
734 // does. Right now we don't know how to pass both in, and this case can't
735 // happen with OpenGL. On nve0, the sample id is part of the texture
736 // coordinate argument.
737 assert(chipset >= NVISA_GK104_CHIPSET ||
738 !i->tex.useOffsets || !i->tex.target.isMS());
739
740 // offset is last source (lod 1st, dc 2nd)
741 if (i->tex.useOffsets) {
742 uint32_t value = 0;
743 int n, c;
744 int s = i->srcCount(0xff, true);
745 if (i->srcExists(s)) // move potential predicate out of the way
746 i->moveSources(s, 1);
747 if (i->op == OP_TXG) {
748 assert(i->tex.useOffsets == 1);
749 for (c = 0; c < 3; ++c)
750 value |= (i->tex.offset[0][c] & 0xff) << (c * 8);
751 } else {
752 for (n = 0; n < i->tex.useOffsets; ++n)
753 for (c = 0; c < 3; ++c)
754 value |= (i->tex.offset[n][c] & 0xf) << (n * 12 + c * 4);
755 }
756 i->setSrc(s, bld.loadImm(NULL, value));
757 }
758
759 if (chipset >= NVISA_GK104_CHIPSET) {
760 //
761 // If TEX requires more than 4 sources, the 2nd register tuple must be
762 // aligned to 4, even if it consists of just a single 4-byte register.
763 //
764 // XXX HACK: We insert 0 sources to avoid the 5 or 6 regs case.
765 //
766 int s = i->srcCount(0xff, true);
767 if (s > 4 && s < 7) {
768 if (i->srcExists(s)) // move potential predicate out of the way
769 i->moveSources(s, 7 - s);
770 while (s < 7)
771 i->setSrc(s++, bld.loadImm(NULL, 0));
772 }
773 }
774
775 return true;
776 }
777
778 bool
779 NVC0LoweringPass::handleManualTXD(TexInstruction *i)
780 {
781 static const uint8_t qOps[4][2] =
782 {
783 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(MOV2, MOV2, ADD, ADD) }, // l0
784 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(MOV2, MOV2, ADD, ADD) }, // l1
785 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l2
786 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l3
787 };
788 Value *def[4][4];
789 Value *crd[3];
790 Instruction *tex;
791 Value *zero = bld.loadImm(bld.getSSA(), 0);
792 int l, c;
793 const int dim = i->tex.target.getDim();
794
795 i->op = OP_TEX; // no need to clone dPdx/dPdy later
796
797 for (c = 0; c < dim; ++c)
798 crd[c] = bld.getScratch();
799
800 bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
801 for (l = 0; l < 4; ++l) {
802 // mov coordinates from lane l to all lanes
803 for (c = 0; c < dim; ++c)
804 bld.mkQuadop(0x00, crd[c], l, i->getSrc(c), zero);
805 // add dPdx from lane l to lanes dx
806 for (c = 0; c < dim; ++c)
807 bld.mkQuadop(qOps[l][0], crd[c], l, i->dPdx[c].get(), crd[c]);
808 // add dPdy from lane l to lanes dy
809 for (c = 0; c < dim; ++c)
810 bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
811 // texture
812 bld.insert(tex = cloneForward(func, i));
813 for (c = 0; c < dim; ++c)
814 tex->setSrc(c, crd[c]);
815 // save results
816 for (c = 0; i->defExists(c); ++c) {
817 Instruction *mov;
818 def[c][l] = bld.getSSA();
819 mov = bld.mkMov(def[c][l], tex->getDef(c));
820 mov->fixed = 1;
821 mov->lanes = 1 << l;
822 }
823 }
824 bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
825
826 for (c = 0; i->defExists(c); ++c) {
827 Instruction *u = bld.mkOp(OP_UNION, TYPE_U32, i->getDef(c));
828 for (l = 0; l < 4; ++l)
829 u->setSrc(l, def[c][l]);
830 }
831
832 i->bb->remove(i);
833 return true;
834 }
835
836 bool
837 NVC0LoweringPass::handleTXD(TexInstruction *txd)
838 {
839 int dim = txd->tex.target.getDim();
840 int arg = txd->tex.target.getArgCount();
841
842 handleTEX(txd);
843 while (txd->srcExists(arg))
844 ++arg;
845
846 txd->tex.derivAll = true;
847 if (dim > 2 ||
848 txd->tex.target.isCube() ||
849 arg > 4 ||
850 txd->tex.target.isShadow())
851 return handleManualTXD(txd);
852
853 for (int c = 0; c < dim; ++c) {
854 txd->setSrc(arg + c * 2 + 0, txd->dPdx[c]);
855 txd->setSrc(arg + c * 2 + 1, txd->dPdy[c]);
856 txd->dPdx[c].set(NULL);
857 txd->dPdy[c].set(NULL);
858 }
859 return true;
860 }
861
862 bool
863 NVC0LoweringPass::handleTXQ(TexInstruction *txq)
864 {
865 // TODO: indirect resource/sampler index
866 return true;
867 }
868
869 bool
870 NVC0LoweringPass::handleTXLQ(TexInstruction *i)
871 {
872 /* The outputs are inverted compared to what the TGSI instruction
873 * expects. Take that into account in the mask.
874 */
875 assert((i->tex.mask & ~3) == 0);
876 if (i->tex.mask == 1)
877 i->tex.mask = 2;
878 else if (i->tex.mask == 2)
879 i->tex.mask = 1;
880 handleTEX(i);
881 bld.setPosition(i, true);
882
883 /* The returned values are not quite what we want:
884 * (a) convert from s16/u16 to f32
885 * (b) multiply by 1/256
886 */
887 for (int def = 0; def < 2; ++def) {
888 if (!i->defExists(def))
889 continue;
890 enum DataType type = TYPE_S16;
891 if (i->tex.mask == 2 || def > 0)
892 type = TYPE_U16;
893 bld.mkCvt(OP_CVT, TYPE_F32, i->getDef(def), type, i->getDef(def));
894 bld.mkOp2(OP_MUL, TYPE_F32, i->getDef(def),
895 i->getDef(def), bld.loadImm(NULL, 1.0f / 256));
896 }
897 if (i->tex.mask == 3) {
898 LValue *t = new_LValue(func, FILE_GPR);
899 bld.mkMov(t, i->getDef(0));
900 bld.mkMov(i->getDef(0), i->getDef(1));
901 bld.mkMov(i->getDef(1), t);
902 }
903 return true;
904 }
905
906
907 bool
908 NVC0LoweringPass::handleATOM(Instruction *atom)
909 {
910 SVSemantic sv;
911
912 switch (atom->src(0).getFile()) {
913 case FILE_MEMORY_LOCAL:
914 sv = SV_LBASE;
915 break;
916 case FILE_MEMORY_SHARED:
917 sv = SV_SBASE;
918 break;
919 default:
920 assert(atom->src(0).getFile() == FILE_MEMORY_GLOBAL);
921 return true;
922 }
923 Value *base =
924 bld.mkOp1v(OP_RDSV, TYPE_U32, bld.getScratch(), bld.mkSysVal(sv, 0));
925 Value *ptr = atom->getIndirect(0, 0);
926
927 atom->setSrc(0, cloneShallow(func, atom->getSrc(0)));
928 atom->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
929 if (ptr)
930 base = bld.mkOp2v(OP_ADD, TYPE_U32, base, base, ptr);
931 atom->setIndirect(0, 0, base);
932
933 return true;
934 }
935
936 bool
937 NVC0LoweringPass::handleCasExch(Instruction *cas, bool needCctl)
938 {
939 if (cas->subOp != NV50_IR_SUBOP_ATOM_CAS &&
940 cas->subOp != NV50_IR_SUBOP_ATOM_EXCH)
941 return false;
942 bld.setPosition(cas, true);
943
944 if (needCctl) {
945 Instruction *cctl = bld.mkOp1(OP_CCTL, TYPE_NONE, NULL, cas->getSrc(0));
946 cctl->setIndirect(0, 0, cas->getIndirect(0, 0));
947 cctl->fixed = 1;
948 cctl->subOp = NV50_IR_SUBOP_CCTL_IV;
949 if (cas->isPredicated())
950 cctl->setPredicate(cas->cc, cas->getPredicate());
951 }
952
953 if (cas->defExists(0) && cas->subOp == NV50_IR_SUBOP_ATOM_CAS) {
954 // CAS is crazy. It's 2nd source is a double reg, and the 3rd source
955 // should be set to the high part of the double reg or bad things will
956 // happen elsewhere in the universe.
957 // Also, it sometimes returns the new value instead of the old one
958 // under mysterious circumstances.
959 Value *dreg = bld.getSSA(8);
960 bld.setPosition(cas, false);
961 bld.mkOp2(OP_MERGE, TYPE_U64, dreg, cas->getSrc(1), cas->getSrc(2));
962 cas->setSrc(1, dreg);
963 }
964
965 return true;
966 }
967
968 inline Value *
969 NVC0LoweringPass::loadResInfo32(Value *ptr, uint32_t off)
970 {
971 uint8_t b = prog->driver->io.resInfoCBSlot;
972 off += prog->driver->io.suInfoBase;
973 return bld.
974 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
975 }
976
977 inline Value *
978 NVC0LoweringPass::loadMsInfo32(Value *ptr, uint32_t off)
979 {
980 uint8_t b = prog->driver->io.msInfoCBSlot;
981 off += prog->driver->io.msInfoBase;
982 return bld.
983 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
984 }
985
986 /* On nvc0, surface info is obtained via the surface binding points passed
987 * to the SULD/SUST instructions.
988 * On nve4, surface info is stored in c[] and is used by various special
989 * instructions, e.g. for clamping coordiantes or generating an address.
990 * They couldn't just have added an equivalent to TIC now, couldn't they ?
991 */
992 #define NVE4_SU_INFO_ADDR 0x00
993 #define NVE4_SU_INFO_FMT 0x04
994 #define NVE4_SU_INFO_DIM_X 0x08
995 #define NVE4_SU_INFO_PITCH 0x0c
996 #define NVE4_SU_INFO_DIM_Y 0x10
997 #define NVE4_SU_INFO_ARRAY 0x14
998 #define NVE4_SU_INFO_DIM_Z 0x18
999 #define NVE4_SU_INFO_UNK1C 0x1c
1000 #define NVE4_SU_INFO_WIDTH 0x20
1001 #define NVE4_SU_INFO_HEIGHT 0x24
1002 #define NVE4_SU_INFO_DEPTH 0x28
1003 #define NVE4_SU_INFO_TARGET 0x2c
1004 #define NVE4_SU_INFO_CALL 0x30
1005 #define NVE4_SU_INFO_RAW_X 0x34
1006 #define NVE4_SU_INFO_MS_X 0x38
1007 #define NVE4_SU_INFO_MS_Y 0x3c
1008
1009 #define NVE4_SU_INFO__STRIDE 0x40
1010
1011 #define NVE4_SU_INFO_DIM(i) (0x08 + (i) * 8)
1012 #define NVE4_SU_INFO_SIZE(i) (0x20 + (i) * 4)
1013 #define NVE4_SU_INFO_MS(i) (0x38 + (i) * 4)
1014
1015 static inline uint16_t getSuClampSubOp(const TexInstruction *su, int c)
1016 {
1017 switch (su->tex.target.getEnum()) {
1018 case TEX_TARGET_BUFFER: return NV50_IR_SUBOP_SUCLAMP_PL(0, 1);
1019 case TEX_TARGET_RECT: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1020 case TEX_TARGET_1D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1021 case TEX_TARGET_1D_ARRAY: return (c == 1) ?
1022 NV50_IR_SUBOP_SUCLAMP_PL(0, 2) :
1023 NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1024 case TEX_TARGET_2D: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1025 case TEX_TARGET_2D_MS: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1026 case TEX_TARGET_2D_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1027 case TEX_TARGET_2D_MS_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1028 case TEX_TARGET_3D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1029 case TEX_TARGET_CUBE: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1030 case TEX_TARGET_CUBE_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1031 default:
1032 assert(0);
1033 return 0;
1034 }
1035 }
1036
1037 void
1038 NVC0LoweringPass::adjustCoordinatesMS(TexInstruction *tex)
1039 {
1040 const uint16_t base = tex->tex.r * NVE4_SU_INFO__STRIDE;
1041 const int arg = tex->tex.target.getArgCount();
1042
1043 if (tex->tex.target == TEX_TARGET_2D_MS)
1044 tex->tex.target = TEX_TARGET_2D;
1045 else
1046 if (tex->tex.target == TEX_TARGET_2D_MS_ARRAY)
1047 tex->tex.target = TEX_TARGET_2D_ARRAY;
1048 else
1049 return;
1050
1051 Value *x = tex->getSrc(0);
1052 Value *y = tex->getSrc(1);
1053 Value *s = tex->getSrc(arg - 1);
1054
1055 Value *tx = bld.getSSA(), *ty = bld.getSSA(), *ts = bld.getSSA();
1056
1057 Value *ms_x = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(0));
1058 Value *ms_y = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(1));
1059
1060 bld.mkOp2(OP_SHL, TYPE_U32, tx, x, ms_x);
1061 bld.mkOp2(OP_SHL, TYPE_U32, ty, y, ms_y);
1062
1063 s = bld.mkOp2v(OP_AND, TYPE_U32, ts, s, bld.loadImm(NULL, 0x7));
1064 s = bld.mkOp2v(OP_SHL, TYPE_U32, ts, ts, bld.mkImm(3));
1065
1066 Value *dx = loadMsInfo32(ts, 0x0);
1067 Value *dy = loadMsInfo32(ts, 0x4);
1068
1069 bld.mkOp2(OP_ADD, TYPE_U32, tx, tx, dx);
1070 bld.mkOp2(OP_ADD, TYPE_U32, ty, ty, dy);
1071
1072 tex->setSrc(0, tx);
1073 tex->setSrc(1, ty);
1074 tex->moveSources(arg, -1);
1075 }
1076
1077 // Sets 64-bit "generic address", predicate and format sources for SULD/SUST.
1078 // They're computed from the coordinates using the surface info in c[] space.
1079 void
1080 NVC0LoweringPass::processSurfaceCoordsNVE4(TexInstruction *su)
1081 {
1082 Instruction *insn;
1083 const bool atom = su->op == OP_SUREDB || su->op == OP_SUREDP;
1084 const bool raw =
1085 su->op == OP_SULDB || su->op == OP_SUSTB || su->op == OP_SUREDB;
1086 const int idx = su->tex.r;
1087 const int dim = su->tex.target.getDim();
1088 const int arg = dim + (su->tex.target.isArray() ? 1 : 0);
1089 const uint16_t base = idx * NVE4_SU_INFO__STRIDE;
1090 int c;
1091 Value *zero = bld.mkImm(0);
1092 Value *p1 = NULL;
1093 Value *v;
1094 Value *src[3];
1095 Value *bf, *eau, *off;
1096 Value *addr, *pred;
1097
1098 off = bld.getScratch(4);
1099 bf = bld.getScratch(4);
1100 addr = bld.getSSA(8);
1101 pred = bld.getScratch(1, FILE_PREDICATE);
1102
1103 bld.setPosition(su, false);
1104
1105 adjustCoordinatesMS(su);
1106
1107 // calculate clamped coordinates
1108 for (c = 0; c < arg; ++c) {
1109 src[c] = bld.getScratch();
1110 if (c == 0 && raw)
1111 v = loadResInfo32(NULL, base + NVE4_SU_INFO_RAW_X);
1112 else
1113 v = loadResInfo32(NULL, base + NVE4_SU_INFO_DIM(c));
1114 bld.mkOp3(OP_SUCLAMP, TYPE_S32, src[c], su->getSrc(c), v, zero)
1115 ->subOp = getSuClampSubOp(su, c);
1116 }
1117 for (; c < 3; ++c)
1118 src[c] = zero;
1119
1120 // set predicate output
1121 if (su->tex.target == TEX_TARGET_BUFFER) {
1122 src[0]->getInsn()->setFlagsDef(1, pred);
1123 } else
1124 if (su->tex.target.isArray()) {
1125 p1 = bld.getSSA(1, FILE_PREDICATE);
1126 src[dim]->getInsn()->setFlagsDef(1, p1);
1127 }
1128
1129 // calculate pixel offset
1130 if (dim == 1) {
1131 if (su->tex.target != TEX_TARGET_BUFFER)
1132 bld.mkOp2(OP_AND, TYPE_U32, off, src[0], bld.loadImm(NULL, 0xffff));
1133 } else
1134 if (dim == 3) {
1135 v = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
1136 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[2], v, src[1])
1137 ->subOp = NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1138
1139 v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
1140 bld.mkOp3(OP_MADSP, TYPE_U32, off, off, v, src[0])
1141 ->subOp = NV50_IR_SUBOP_MADSP(0,2,8); // u32 u16l u16l
1142 } else {
1143 assert(dim == 2);
1144 v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
1145 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[1], v, src[0])
1146 ->subOp = su->tex.target.isArray() ?
1147 NV50_IR_SUBOP_MADSP_SD : NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1148 }
1149
1150 // calculate effective address part 1
1151 if (su->tex.target == TEX_TARGET_BUFFER) {
1152 if (raw) {
1153 bf = src[0];
1154 } else {
1155 v = loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
1156 bld.mkOp3(OP_VSHL, TYPE_U32, bf, src[0], v, zero)
1157 ->subOp = NV50_IR_SUBOP_V1(7,6,8|2);
1158 }
1159 } else {
1160 Value *y = src[1];
1161 Value *z = src[2];
1162 uint16_t subOp = 0;
1163
1164 switch (dim) {
1165 case 1:
1166 y = zero;
1167 z = zero;
1168 break;
1169 case 2:
1170 z = off;
1171 if (!su->tex.target.isArray()) {
1172 z = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
1173 subOp = NV50_IR_SUBOP_SUBFM_3D;
1174 }
1175 break;
1176 default:
1177 subOp = NV50_IR_SUBOP_SUBFM_3D;
1178 assert(dim == 3);
1179 break;
1180 }
1181 insn = bld.mkOp3(OP_SUBFM, TYPE_U32, bf, src[0], y, z);
1182 insn->subOp = subOp;
1183 insn->setFlagsDef(1, pred);
1184 }
1185
1186 // part 2
1187 v = loadResInfo32(NULL, base + NVE4_SU_INFO_ADDR);
1188
1189 if (su->tex.target == TEX_TARGET_BUFFER) {
1190 eau = v;
1191 } else {
1192 eau = bld.mkOp3v(OP_SUEAU, TYPE_U32, bld.getScratch(4), off, bf, v);
1193 }
1194 // add array layer offset
1195 if (su->tex.target.isArray()) {
1196 v = loadResInfo32(NULL, base + NVE4_SU_INFO_ARRAY);
1197 if (dim == 1)
1198 bld.mkOp3(OP_MADSP, TYPE_U32, eau, src[1], v, eau)
1199 ->subOp = NV50_IR_SUBOP_MADSP(4,0,0); // u16 u24 u32
1200 else
1201 bld.mkOp3(OP_MADSP, TYPE_U32, eau, v, src[2], eau)
1202 ->subOp = NV50_IR_SUBOP_MADSP(0,0,0); // u32 u24 u32
1203 // combine predicates
1204 assert(p1);
1205 bld.mkOp2(OP_OR, TYPE_U8, pred, pred, p1);
1206 }
1207
1208 if (atom) {
1209 Value *lo = bf;
1210 if (su->tex.target == TEX_TARGET_BUFFER) {
1211 lo = zero;
1212 bld.mkMov(off, bf);
1213 }
1214 // bf == g[] address & 0xff
1215 // eau == g[] address >> 8
1216 bld.mkOp3(OP_PERMT, TYPE_U32, bf, lo, bld.loadImm(NULL, 0x6540), eau);
1217 bld.mkOp3(OP_PERMT, TYPE_U32, eau, zero, bld.loadImm(NULL, 0x0007), eau);
1218 } else
1219 if (su->op == OP_SULDP && su->tex.target == TEX_TARGET_BUFFER) {
1220 // Convert from u32 to u8 address format, which is what the library code
1221 // doing SULDP currently uses.
1222 // XXX: can SUEAU do this ?
1223 // XXX: does it matter that we don't mask high bytes in bf ?
1224 // Grrr.
1225 bld.mkOp2(OP_SHR, TYPE_U32, off, bf, bld.mkImm(8));
1226 bld.mkOp2(OP_ADD, TYPE_U32, eau, eau, off);
1227 }
1228
1229 bld.mkOp2(OP_MERGE, TYPE_U64, addr, bf, eau);
1230
1231 if (atom && su->tex.target == TEX_TARGET_BUFFER)
1232 bld.mkOp2(OP_ADD, TYPE_U64, addr, addr, off);
1233
1234 // let's just set it 0 for raw access and hope it works
1235 v = raw ?
1236 bld.mkImm(0) : loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
1237
1238 // get rid of old coordinate sources, make space for fmt info and predicate
1239 su->moveSources(arg, 3 - arg);
1240 // set 64 bit address and 32-bit format sources
1241 su->setSrc(0, addr);
1242 su->setSrc(1, v);
1243 su->setSrc(2, pred);
1244 }
1245
1246 void
1247 NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction *su)
1248 {
1249 processSurfaceCoordsNVE4(su);
1250
1251 // Who do we hate more ? The person who decided that nvc0's SULD doesn't
1252 // have to support conversion or the person who decided that, in OpenCL,
1253 // you don't have to specify the format here like you do in OpenGL ?
1254
1255 if (su->op == OP_SULDP) {
1256 // We don't patch shaders. Ever.
1257 // You get an indirect call to our library blob here.
1258 // But at least it's uniform.
1259 FlowInstruction *call;
1260 LValue *p[3];
1261 LValue *r[5];
1262 uint16_t base = su->tex.r * NVE4_SU_INFO__STRIDE + NVE4_SU_INFO_CALL;
1263
1264 for (int i = 0; i < 4; ++i)
1265 (r[i] = bld.getScratch(4, FILE_GPR))->reg.data.id = i;
1266 for (int i = 0; i < 3; ++i)
1267 (p[i] = bld.getScratch(1, FILE_PREDICATE))->reg.data.id = i;
1268 (r[4] = bld.getScratch(8, FILE_GPR))->reg.data.id = 4;
1269
1270 bld.mkMov(p[1], bld.mkImm((su->cache == CACHE_CA) ? 1 : 0), TYPE_U8);
1271 bld.mkMov(p[2], bld.mkImm((su->cache == CACHE_CG) ? 1 : 0), TYPE_U8);
1272 bld.mkMov(p[0], su->getSrc(2), TYPE_U8);
1273 bld.mkMov(r[4], su->getSrc(0), TYPE_U64);
1274 bld.mkMov(r[2], su->getSrc(1), TYPE_U32);
1275
1276 call = bld.mkFlow(OP_CALL, NULL, su->cc, su->getPredicate());
1277
1278 call->indirect = 1;
1279 call->absolute = 1;
1280 call->setSrc(0, bld.mkSymbol(FILE_MEMORY_CONST,
1281 prog->driver->io.resInfoCBSlot, TYPE_U32,
1282 prog->driver->io.suInfoBase + base));
1283 call->setSrc(1, r[2]);
1284 call->setSrc(2, r[4]);
1285 for (int i = 0; i < 3; ++i)
1286 call->setSrc(3 + i, p[i]);
1287 for (int i = 0; i < 4; ++i) {
1288 call->setDef(i, r[i]);
1289 bld.mkMov(su->getDef(i), r[i]);
1290 }
1291 call->setDef(4, p[1]);
1292 delete_Instruction(bld.getProgram(), su);
1293 }
1294
1295 if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
1296 // FIXME: for out of bounds access, destination value will be undefined !
1297 Value *pred = su->getSrc(2);
1298 CondCode cc = CC_NOT_P;
1299 if (su->getPredicate()) {
1300 pred = bld.getScratch(1, FILE_PREDICATE);
1301 cc = su->cc;
1302 if (cc == CC_NOT_P) {
1303 bld.mkOp2(OP_OR, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1304 } else {
1305 bld.mkOp2(OP_AND, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1306 pred->getInsn()->src(1).mod = Modifier(NV50_IR_MOD_NOT);
1307 }
1308 }
1309 Instruction *red = bld.mkOp(OP_ATOM, su->dType, su->getDef(0));
1310 red->subOp = su->subOp;
1311 if (!gMemBase)
1312 gMemBase = bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0);
1313 red->setSrc(0, gMemBase);
1314 red->setSrc(1, su->getSrc(3));
1315 if (su->subOp == NV50_IR_SUBOP_ATOM_CAS)
1316 red->setSrc(2, su->getSrc(4));
1317 red->setIndirect(0, 0, su->getSrc(0));
1318 red->setPredicate(cc, pred);
1319 delete_Instruction(bld.getProgram(), su);
1320 handleCasExch(red, true);
1321 } else {
1322 su->sType = (su->tex.target == TEX_TARGET_BUFFER) ? TYPE_U32 : TYPE_U8;
1323 }
1324 }
1325
1326 bool
1327 NVC0LoweringPass::handleWRSV(Instruction *i)
1328 {
1329 Instruction *st;
1330 Symbol *sym;
1331 uint32_t addr;
1332
1333 // must replace, $sreg are not writeable
1334 addr = targ->getSVAddress(FILE_SHADER_OUTPUT, i->getSrc(0)->asSym());
1335 if (addr >= 0x400)
1336 return false;
1337 sym = bld.mkSymbol(FILE_SHADER_OUTPUT, 0, i->sType, addr);
1338
1339 st = bld.mkStore(OP_EXPORT, i->dType, sym, i->getIndirect(0, 0),
1340 i->getSrc(1));
1341 st->perPatch = i->perPatch;
1342
1343 bld.getBB()->remove(i);
1344 return true;
1345 }
1346
1347 void
1348 NVC0LoweringPass::readTessCoord(LValue *dst, int c)
1349 {
1350 Value *laneid = bld.getSSA();
1351 Value *x, *y;
1352
1353 bld.mkOp1(OP_RDSV, TYPE_U32, laneid, bld.mkSysVal(SV_LANEID, 0));
1354
1355 if (c == 0) {
1356 x = dst;
1357 y = NULL;
1358 } else
1359 if (c == 1) {
1360 x = NULL;
1361 y = dst;
1362 } else {
1363 assert(c == 2);
1364 x = bld.getSSA();
1365 y = bld.getSSA();
1366 }
1367 if (x)
1368 bld.mkFetch(x, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f0, NULL, laneid);
1369 if (y)
1370 bld.mkFetch(y, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f4, NULL, laneid);
1371
1372 if (c == 2) {
1373 bld.mkOp2(OP_ADD, TYPE_F32, dst, x, y);
1374 bld.mkOp2(OP_SUB, TYPE_F32, dst, bld.loadImm(NULL, 1.0f), dst);
1375 }
1376 }
1377
1378 bool
1379 NVC0LoweringPass::handleRDSV(Instruction *i)
1380 {
1381 Symbol *sym = i->getSrc(0)->asSym();
1382 const SVSemantic sv = sym->reg.data.sv.sv;
1383 Value *vtx = NULL;
1384 Instruction *ld;
1385 uint32_t addr = targ->getSVAddress(FILE_SHADER_INPUT, sym);
1386
1387 if (addr >= 0x400) {
1388 // mov $sreg
1389 if (sym->reg.data.sv.index == 3) {
1390 // TGSI backend may use 4th component of TID,NTID,CTAID,NCTAID
1391 i->op = OP_MOV;
1392 i->setSrc(0, bld.mkImm((sv == SV_NTID || sv == SV_NCTAID) ? 1 : 0));
1393 }
1394 return true;
1395 }
1396
1397 switch (sv) {
1398 case SV_POSITION:
1399 assert(prog->getType() == Program::TYPE_FRAGMENT);
1400 bld.mkInterp(NV50_IR_INTERP_LINEAR, i->getDef(0), addr, NULL);
1401 break;
1402 case SV_FACE:
1403 {
1404 Value *face = i->getDef(0);
1405 bld.mkInterp(NV50_IR_INTERP_FLAT, face, addr, NULL);
1406 if (i->dType == TYPE_F32) {
1407 bld.mkOp2(OP_AND, TYPE_U32, face, face, bld.mkImm(0x80000000));
1408 bld.mkOp2(OP_XOR, TYPE_U32, face, face, bld.mkImm(0xbf800000));
1409 }
1410 }
1411 break;
1412 case SV_TESS_COORD:
1413 assert(prog->getType() == Program::TYPE_TESSELLATION_EVAL);
1414 readTessCoord(i->getDef(0)->asLValue(), i->getSrc(0)->reg.data.sv.index);
1415 break;
1416 case SV_NTID:
1417 case SV_NCTAID:
1418 case SV_GRIDID:
1419 assert(targ->getChipset() >= NVISA_GK104_CHIPSET); // mov $sreg otherwise
1420 if (sym->reg.data.sv.index == 3) {
1421 i->op = OP_MOV;
1422 i->setSrc(0, bld.mkImm(sv == SV_GRIDID ? 0 : 1));
1423 return true;
1424 }
1425 addr += prog->driver->prop.cp.gridInfoBase;
1426 bld.mkLoad(TYPE_U32, i->getDef(0),
1427 bld.mkSymbol(FILE_MEMORY_CONST, 0, TYPE_U32, addr), NULL);
1428 break;
1429 case SV_SAMPLE_INDEX:
1430 // TODO: Properly pass source as an address in the PIX address space
1431 // (which can be of the form [r0+offset]). But this is currently
1432 // unnecessary.
1433 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
1434 ld->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
1435 break;
1436 case SV_SAMPLE_POS: {
1437 Value *off = new_LValue(func, FILE_GPR);
1438 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
1439 ld->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
1440 bld.mkOp2(OP_SHL, TYPE_U32, off, i->getDef(0), bld.mkImm(3));
1441 bld.mkLoad(TYPE_F32,
1442 i->getDef(0),
1443 bld.mkSymbol(
1444 FILE_MEMORY_CONST, prog->driver->io.resInfoCBSlot,
1445 TYPE_U32, prog->driver->io.sampleInfoBase +
1446 4 * sym->reg.data.sv.index),
1447 off);
1448 break;
1449 }
1450 case SV_SAMPLE_MASK:
1451 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
1452 ld->subOp = NV50_IR_SUBOP_PIXLD_COVMASK;
1453 break;
1454 default:
1455 if (prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1456 vtx = bld.mkOp1v(OP_PFETCH, TYPE_U32, bld.getSSA(), bld.mkImm(0));
1457 ld = bld.mkFetch(i->getDef(0), i->dType,
1458 FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
1459 ld->perPatch = i->perPatch;
1460 break;
1461 }
1462 bld.getBB()->remove(i);
1463 return true;
1464 }
1465
1466 bool
1467 NVC0LoweringPass::handleDIV(Instruction *i)
1468 {
1469 if (!isFloatType(i->dType))
1470 return true;
1471 bld.setPosition(i, false);
1472 Instruction *rcp = bld.mkOp1(OP_RCP, i->dType, bld.getSSA(), i->getSrc(1));
1473 i->op = OP_MUL;
1474 i->setSrc(1, rcp->getDef(0));
1475 return true;
1476 }
1477
1478 bool
1479 NVC0LoweringPass::handleMOD(Instruction *i)
1480 {
1481 if (i->dType != TYPE_F32)
1482 return true;
1483 LValue *value = bld.getScratch();
1484 bld.mkOp1(OP_RCP, TYPE_F32, value, i->getSrc(1));
1485 bld.mkOp2(OP_MUL, TYPE_F32, value, i->getSrc(0), value);
1486 bld.mkOp1(OP_TRUNC, TYPE_F32, value, value);
1487 bld.mkOp2(OP_MUL, TYPE_F32, value, i->getSrc(1), value);
1488 i->op = OP_SUB;
1489 i->setSrc(1, value);
1490 return true;
1491 }
1492
1493 bool
1494 NVC0LoweringPass::handleSQRT(Instruction *i)
1495 {
1496 Instruction *rsq = bld.mkOp1(OP_RSQ, TYPE_F32,
1497 bld.getSSA(), i->getSrc(0));
1498 i->op = OP_MUL;
1499 i->setSrc(1, rsq->getDef(0));
1500
1501 return true;
1502 }
1503
1504 bool
1505 NVC0LoweringPass::handlePOW(Instruction *i)
1506 {
1507 LValue *val = bld.getScratch();
1508
1509 bld.mkOp1(OP_LG2, TYPE_F32, val, i->getSrc(0));
1510 bld.mkOp2(OP_MUL, TYPE_F32, val, i->getSrc(1), val)->dnz = 1;
1511 bld.mkOp1(OP_PREEX2, TYPE_F32, val, val);
1512
1513 i->op = OP_EX2;
1514 i->setSrc(0, val);
1515 i->setSrc(1, NULL);
1516
1517 return true;
1518 }
1519
1520 bool
1521 NVC0LoweringPass::handleEXPORT(Instruction *i)
1522 {
1523 if (prog->getType() == Program::TYPE_FRAGMENT) {
1524 int id = i->getSrc(0)->reg.data.offset / 4;
1525
1526 if (i->src(0).isIndirect(0)) // TODO, ugly
1527 return false;
1528 i->op = OP_MOV;
1529 i->subOp = NV50_IR_SUBOP_MOV_FINAL;
1530 i->src(0).set(i->src(1));
1531 i->setSrc(1, NULL);
1532 i->setDef(0, new_LValue(func, FILE_GPR));
1533 i->getDef(0)->reg.data.id = id;
1534
1535 prog->maxGPR = MAX2(prog->maxGPR, id);
1536 } else
1537 if (prog->getType() == Program::TYPE_GEOMETRY) {
1538 i->setIndirect(0, 1, gpEmitAddress);
1539 }
1540 return true;
1541 }
1542
1543 bool
1544 NVC0LoweringPass::handleOUT(Instruction *i)
1545 {
1546 if (i->op == OP_RESTART && i->prev && i->prev->op == OP_EMIT) {
1547 i->prev->subOp = NV50_IR_SUBOP_EMIT_RESTART;
1548 delete_Instruction(prog, i);
1549 } else {
1550 assert(gpEmitAddress);
1551 i->setDef(0, gpEmitAddress);
1552 if (i->srcExists(0))
1553 i->setSrc(1, i->getSrc(0));
1554 i->setSrc(0, gpEmitAddress);
1555 }
1556 return true;
1557 }
1558
1559 // Generate a binary predicate if an instruction is predicated by
1560 // e.g. an f32 value.
1561 void
1562 NVC0LoweringPass::checkPredicate(Instruction *insn)
1563 {
1564 Value *pred = insn->getPredicate();
1565 Value *pdst;
1566
1567 if (!pred || pred->reg.file == FILE_PREDICATE)
1568 return;
1569 pdst = new_LValue(func, FILE_PREDICATE);
1570
1571 // CAUTION: don't use pdst->getInsn, the definition might not be unique,
1572 // delay turning PSET(FSET(x,y),0) into PSET(x,y) to a later pass
1573
1574 bld.mkCmp(OP_SET, CC_NEU, insn->dType, pdst, insn->dType, bld.mkImm(0), pred);
1575
1576 insn->setPredicate(insn->cc, pdst);
1577 }
1578
1579 //
1580 // - add quadop dance for texturing
1581 // - put FP outputs in GPRs
1582 // - convert instruction sequences
1583 //
1584 bool
1585 NVC0LoweringPass::visit(Instruction *i)
1586 {
1587 bld.setPosition(i, false);
1588
1589 if (i->cc != CC_ALWAYS)
1590 checkPredicate(i);
1591
1592 switch (i->op) {
1593 case OP_TEX:
1594 case OP_TXB:
1595 case OP_TXL:
1596 case OP_TXF:
1597 case OP_TXG:
1598 return handleTEX(i->asTex());
1599 case OP_TXD:
1600 return handleTXD(i->asTex());
1601 case OP_TXLQ:
1602 return handleTXLQ(i->asTex());
1603 case OP_TXQ:
1604 return handleTXQ(i->asTex());
1605 case OP_EX2:
1606 bld.mkOp1(OP_PREEX2, TYPE_F32, i->getDef(0), i->getSrc(0));
1607 i->setSrc(0, i->getDef(0));
1608 break;
1609 case OP_POW:
1610 return handlePOW(i);
1611 case OP_DIV:
1612 return handleDIV(i);
1613 case OP_MOD:
1614 return handleMOD(i);
1615 case OP_SQRT:
1616 return handleSQRT(i);
1617 case OP_EXPORT:
1618 return handleEXPORT(i);
1619 case OP_EMIT:
1620 case OP_RESTART:
1621 return handleOUT(i);
1622 case OP_RDSV:
1623 return handleRDSV(i);
1624 case OP_WRSV:
1625 return handleWRSV(i);
1626 case OP_LOAD:
1627 if (i->src(0).getFile() == FILE_SHADER_INPUT) {
1628 if (prog->getType() == Program::TYPE_COMPUTE) {
1629 i->getSrc(0)->reg.file = FILE_MEMORY_CONST;
1630 i->getSrc(0)->reg.fileIndex = 0;
1631 } else
1632 if (prog->getType() == Program::TYPE_GEOMETRY &&
1633 i->src(0).isIndirect(0)) {
1634 // XXX: this assumes vec4 units
1635 Value *ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
1636 i->getIndirect(0, 0), bld.mkImm(4));
1637 i->setIndirect(0, 0, ptr);
1638 } else {
1639 i->op = OP_VFETCH;
1640 assert(prog->getType() != Program::TYPE_FRAGMENT); // INTERP
1641 }
1642 }
1643 break;
1644 case OP_ATOM:
1645 {
1646 const bool cctl = i->src(0).getFile() == FILE_MEMORY_GLOBAL;
1647 handleATOM(i);
1648 handleCasExch(i, cctl);
1649 }
1650 break;
1651 case OP_SULDB:
1652 case OP_SULDP:
1653 case OP_SUSTB:
1654 case OP_SUSTP:
1655 case OP_SUREDB:
1656 case OP_SUREDP:
1657 if (targ->getChipset() >= NVISA_GK104_CHIPSET)
1658 handleSurfaceOpNVE4(i->asTex());
1659 break;
1660 default:
1661 break;
1662 }
1663 return true;
1664 }
1665
1666 bool
1667 TargetNVC0::runLegalizePass(Program *prog, CGStage stage) const
1668 {
1669 if (stage == CG_STAGE_PRE_SSA) {
1670 NVC0LoweringPass pass(prog);
1671 return pass.run(prog, false, true);
1672 } else
1673 if (stage == CG_STAGE_POST_RA) {
1674 NVC0LegalizePostRA pass(prog);
1675 return pass.run(prog, false, true);
1676 } else
1677 if (stage == CG_STAGE_SSA) {
1678 NVC0LegalizeSSA pass;
1679 return pass.run(prog, false, true);
1680 }
1681 return false;
1682 }
1683
1684 } // namespace nv50_ir