nv50,nvc0: Fix invalid constant.
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_lowering_nvc0.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_build_util.h"
25
26 #include "codegen/nv50_ir_target_nvc0.h"
27 #include "codegen/nv50_ir_lowering_nvc0.h"
28
29 #include <limits>
30
31 namespace nv50_ir {
32
33 #define QOP_ADD 0
34 #define QOP_SUBR 1
35 #define QOP_SUB 2
36 #define QOP_MOV2 3
37
38 // UL UR LL LR
39 #define QUADOP(q, r, s, t) \
40 ((QOP_##q << 6) | (QOP_##r << 4) | \
41 (QOP_##s << 2) | (QOP_##t << 0))
42
43 void
44 NVC0LegalizeSSA::handleDIV(Instruction *i)
45 {
46 FlowInstruction *call;
47 int builtin;
48 Value *def[2];
49
50 bld.setPosition(i, false);
51 def[0] = bld.mkMovToReg(0, i->getSrc(0))->getDef(0);
52 def[1] = bld.mkMovToReg(1, i->getSrc(1))->getDef(0);
53 switch (i->dType) {
54 case TYPE_U32: builtin = NVC0_BUILTIN_DIV_U32; break;
55 case TYPE_S32: builtin = NVC0_BUILTIN_DIV_S32; break;
56 default:
57 return;
58 }
59 call = bld.mkFlow(OP_CALL, NULL, CC_ALWAYS, NULL);
60 bld.mkMov(i->getDef(0), def[(i->op == OP_DIV) ? 0 : 1]);
61 bld.mkClobber(FILE_GPR, (i->op == OP_DIV) ? 0xe : 0xd, 2);
62 bld.mkClobber(FILE_PREDICATE, (i->dType == TYPE_S32) ? 0xf : 0x3, 0);
63
64 call->fixed = 1;
65 call->absolute = call->builtin = 1;
66 call->target.builtin = builtin;
67 delete_Instruction(prog, i);
68 }
69
70 void
71 NVC0LegalizeSSA::handleRCPRSQ(Instruction *i)
72 {
73 assert(i->dType == TYPE_F64);
74 // There are instructions that will compute the high 32 bits of the 64-bit
75 // float. We will just stick 0 in the bottom 32 bits.
76
77 bld.setPosition(i, false);
78
79 // 1. Take the source and it up.
80 Value *src[2], *dst[2], *def = i->getDef(0);
81 bld.mkSplit(src, 4, i->getSrc(0));
82
83 // 2. We don't care about the low 32 bits of the destination. Stick a 0 in.
84 dst[0] = bld.loadImm(NULL, 0);
85 dst[1] = bld.getSSA();
86
87 // 3. The new version of the instruction takes the high 32 bits of the
88 // source and outputs the high 32 bits of the destination.
89 i->setSrc(0, src[1]);
90 i->setDef(0, dst[1]);
91 i->setType(TYPE_F32);
92 i->subOp = NV50_IR_SUBOP_RCPRSQ_64H;
93
94 // 4. Recombine the two dst pieces back into the original destination.
95 bld.setPosition(i, true);
96 bld.mkOp2(OP_MERGE, TYPE_U64, def, dst[0], dst[1]);
97 }
98
99 void
100 NVC0LegalizeSSA::handleFTZ(Instruction *i)
101 {
102 // Only want to flush float inputs
103 assert(i->sType == TYPE_F32);
104
105 // If we're already flushing denorms (and NaN's) to zero, no need for this.
106 if (i->dnz)
107 return;
108
109 // Only certain classes of operations can flush
110 OpClass cls = prog->getTarget()->getOpClass(i->op);
111 if (cls != OPCLASS_ARITH && cls != OPCLASS_COMPARE &&
112 cls != OPCLASS_CONVERT)
113 return;
114
115 i->ftz = true;
116 }
117
118 bool
119 NVC0LegalizeSSA::visit(Function *fn)
120 {
121 bld.setProgram(fn->getProgram());
122 return true;
123 }
124
125 bool
126 NVC0LegalizeSSA::visit(BasicBlock *bb)
127 {
128 Instruction *next;
129 for (Instruction *i = bb->getEntry(); i; i = next) {
130 next = i->next;
131 if (i->sType == TYPE_F32) {
132 if (prog->getType() != Program::TYPE_COMPUTE)
133 handleFTZ(i);
134 continue;
135 }
136 switch (i->op) {
137 case OP_DIV:
138 case OP_MOD:
139 handleDIV(i);
140 break;
141 case OP_RCP:
142 case OP_RSQ:
143 if (i->dType == TYPE_F64)
144 handleRCPRSQ(i);
145 break;
146 default:
147 break;
148 }
149 }
150 return true;
151 }
152
153 NVC0LegalizePostRA::NVC0LegalizePostRA(const Program *prog)
154 : rZero(NULL),
155 carry(NULL),
156 needTexBar(prog->getTarget()->getChipset() >= 0xe0)
157 {
158 }
159
160 bool
161 NVC0LegalizePostRA::insnDominatedBy(const Instruction *later,
162 const Instruction *early) const
163 {
164 if (early->bb == later->bb)
165 return early->serial < later->serial;
166 return later->bb->dominatedBy(early->bb);
167 }
168
169 void
170 NVC0LegalizePostRA::addTexUse(std::list<TexUse> &uses,
171 Instruction *usei, const Instruction *texi)
172 {
173 bool add = true;
174 for (std::list<TexUse>::iterator it = uses.begin();
175 it != uses.end();) {
176 if (insnDominatedBy(usei, it->insn)) {
177 add = false;
178 break;
179 }
180 if (insnDominatedBy(it->insn, usei))
181 it = uses.erase(it);
182 else
183 ++it;
184 }
185 if (add)
186 uses.push_back(TexUse(usei, texi));
187 }
188
189 // While it might be tempting to use the an algorithm that just looks at tex
190 // uses, not all texture results are guaranteed to be used on all paths. In
191 // the case where along some control flow path a texture result is never used,
192 // we might reuse that register for something else, creating a
193 // write-after-write hazard. So we have to manually look through all
194 // instructions looking for ones that reference the registers in question.
195 void
196 NVC0LegalizePostRA::findFirstUses(
197 Instruction *texi, std::list<TexUse> &uses)
198 {
199 int minGPR = texi->def(0).rep()->reg.data.id;
200 int maxGPR = minGPR + texi->def(0).rep()->reg.size / 4 - 1;
201
202 unordered_set<const BasicBlock *> visited;
203 findFirstUsesBB(minGPR, maxGPR, texi->next, texi, uses, visited);
204 }
205
206 void
207 NVC0LegalizePostRA::findFirstUsesBB(
208 int minGPR, int maxGPR, Instruction *start,
209 const Instruction *texi, std::list<TexUse> &uses,
210 unordered_set<const BasicBlock *> &visited)
211 {
212 const BasicBlock *bb = start->bb;
213
214 // We don't process the whole bb the first time around. This is correct,
215 // however we might be in a loop and hit this BB again, and need to process
216 // the full thing. So only mark a bb as visited if we processed it from the
217 // beginning.
218 if (start == bb->getEntry()) {
219 if (visited.find(bb) != visited.end())
220 return;
221 visited.insert(bb);
222 }
223
224 for (Instruction *insn = start; insn != bb->getExit(); insn = insn->next) {
225 if (insn->isNop())
226 continue;
227
228 for (int d = 0; insn->defExists(d); ++d) {
229 if (insn->def(d).getFile() != FILE_GPR ||
230 insn->def(d).rep()->reg.data.id < minGPR ||
231 insn->def(d).rep()->reg.data.id > maxGPR)
232 continue;
233 addTexUse(uses, insn, texi);
234 return;
235 }
236
237 for (int s = 0; insn->srcExists(s); ++s) {
238 if (insn->src(s).getFile() != FILE_GPR ||
239 insn->src(s).rep()->reg.data.id < minGPR ||
240 insn->src(s).rep()->reg.data.id > maxGPR)
241 continue;
242 addTexUse(uses, insn, texi);
243 return;
244 }
245 }
246
247 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
248 findFirstUsesBB(minGPR, maxGPR, BasicBlock::get(ei.getNode())->getEntry(),
249 texi, uses, visited);
250 }
251 }
252
253 // Texture barriers:
254 // This pass is a bit long and ugly and can probably be optimized.
255 //
256 // 1. obtain a list of TEXes and their outputs' first use(s)
257 // 2. calculate the barrier level of each first use (minimal number of TEXes,
258 // over all paths, between the TEX and the use in question)
259 // 3. for each barrier, if all paths from the source TEX to that barrier
260 // contain a barrier of lesser level, it can be culled
261 bool
262 NVC0LegalizePostRA::insertTextureBarriers(Function *fn)
263 {
264 std::list<TexUse> *uses;
265 std::vector<Instruction *> texes;
266 std::vector<int> bbFirstTex;
267 std::vector<int> bbFirstUse;
268 std::vector<int> texCounts;
269 std::vector<TexUse> useVec;
270 ArrayList insns;
271
272 fn->orderInstructions(insns);
273
274 texCounts.resize(fn->allBBlocks.getSize(), 0);
275 bbFirstTex.resize(fn->allBBlocks.getSize(), insns.getSize());
276 bbFirstUse.resize(fn->allBBlocks.getSize(), insns.getSize());
277
278 // tag BB CFG nodes by their id for later
279 for (ArrayList::Iterator i = fn->allBBlocks.iterator(); !i.end(); i.next()) {
280 BasicBlock *bb = reinterpret_cast<BasicBlock *>(i.get());
281 if (bb)
282 bb->cfg.tag = bb->getId();
283 }
284
285 // gather the first uses for each TEX
286 for (int i = 0; i < insns.getSize(); ++i) {
287 Instruction *tex = reinterpret_cast<Instruction *>(insns.get(i));
288 if (isTextureOp(tex->op)) {
289 texes.push_back(tex);
290 if (!texCounts.at(tex->bb->getId()))
291 bbFirstTex[tex->bb->getId()] = texes.size() - 1;
292 texCounts[tex->bb->getId()]++;
293 }
294 }
295 insns.clear();
296 if (texes.empty())
297 return false;
298 uses = new std::list<TexUse>[texes.size()];
299 if (!uses)
300 return false;
301 for (size_t i = 0; i < texes.size(); ++i) {
302 findFirstUses(texes[i], uses[i]);
303 }
304
305 // determine the barrier level at each use
306 for (size_t i = 0; i < texes.size(); ++i) {
307 for (std::list<TexUse>::iterator u = uses[i].begin(); u != uses[i].end();
308 ++u) {
309 BasicBlock *tb = texes[i]->bb;
310 BasicBlock *ub = u->insn->bb;
311 if (tb == ub) {
312 u->level = 0;
313 for (size_t j = i + 1; j < texes.size() &&
314 texes[j]->bb == tb && texes[j]->serial < u->insn->serial;
315 ++j)
316 u->level++;
317 } else {
318 u->level = fn->cfg.findLightestPathWeight(&tb->cfg,
319 &ub->cfg, texCounts);
320 if (u->level < 0) {
321 WARN("Failed to find path TEX -> TEXBAR\n");
322 u->level = 0;
323 continue;
324 }
325 // this counted all TEXes in the origin block, correct that
326 u->level -= i - bbFirstTex.at(tb->getId()) + 1 /* this TEX */;
327 // and did not count the TEXes in the destination block, add those
328 for (size_t j = bbFirstTex.at(ub->getId()); j < texes.size() &&
329 texes[j]->bb == ub && texes[j]->serial < u->insn->serial;
330 ++j)
331 u->level++;
332 }
333 assert(u->level >= 0);
334 useVec.push_back(*u);
335 }
336 }
337 delete[] uses;
338
339 // insert the barriers
340 for (size_t i = 0; i < useVec.size(); ++i) {
341 Instruction *prev = useVec[i].insn->prev;
342 if (useVec[i].level < 0)
343 continue;
344 if (prev && prev->op == OP_TEXBAR) {
345 if (prev->subOp > useVec[i].level)
346 prev->subOp = useVec[i].level;
347 prev->setSrc(prev->srcCount(), useVec[i].tex->getDef(0));
348 } else {
349 Instruction *bar = new_Instruction(func, OP_TEXBAR, TYPE_NONE);
350 bar->fixed = 1;
351 bar->subOp = useVec[i].level;
352 // make use explicit to ease latency calculation
353 bar->setSrc(bar->srcCount(), useVec[i].tex->getDef(0));
354 useVec[i].insn->bb->insertBefore(useVec[i].insn, bar);
355 }
356 }
357
358 if (fn->getProgram()->optLevel < 3)
359 return true;
360
361 std::vector<Limits> limitT, limitB, limitS; // entry, exit, single
362
363 limitT.resize(fn->allBBlocks.getSize(), Limits(0, 0));
364 limitB.resize(fn->allBBlocks.getSize(), Limits(0, 0));
365 limitS.resize(fn->allBBlocks.getSize());
366
367 // cull unneeded barriers (should do that earlier, but for simplicity)
368 IteratorRef bi = fn->cfg.iteratorCFG();
369 // first calculate min/max outstanding TEXes for each BB
370 for (bi->reset(); !bi->end(); bi->next()) {
371 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
372 BasicBlock *bb = BasicBlock::get(n);
373 int min = 0;
374 int max = std::numeric_limits<int>::max();
375 for (Instruction *i = bb->getFirst(); i; i = i->next) {
376 if (isTextureOp(i->op)) {
377 min++;
378 if (max < std::numeric_limits<int>::max())
379 max++;
380 } else
381 if (i->op == OP_TEXBAR) {
382 min = MIN2(min, i->subOp);
383 max = MIN2(max, i->subOp);
384 }
385 }
386 // limits when looking at an isolated block
387 limitS[bb->getId()].min = min;
388 limitS[bb->getId()].max = max;
389 }
390 // propagate the min/max values
391 for (unsigned int l = 0; l <= fn->loopNestingBound; ++l) {
392 for (bi->reset(); !bi->end(); bi->next()) {
393 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
394 BasicBlock *bb = BasicBlock::get(n);
395 const int bbId = bb->getId();
396 for (Graph::EdgeIterator ei = n->incident(); !ei.end(); ei.next()) {
397 BasicBlock *in = BasicBlock::get(ei.getNode());
398 const int inId = in->getId();
399 limitT[bbId].min = MAX2(limitT[bbId].min, limitB[inId].min);
400 limitT[bbId].max = MAX2(limitT[bbId].max, limitB[inId].max);
401 }
402 // I just hope this is correct ...
403 if (limitS[bbId].max == std::numeric_limits<int>::max()) {
404 // no barrier
405 limitB[bbId].min = limitT[bbId].min + limitS[bbId].min;
406 limitB[bbId].max = limitT[bbId].max + limitS[bbId].min;
407 } else {
408 // block contained a barrier
409 limitB[bbId].min = MIN2(limitS[bbId].max,
410 limitT[bbId].min + limitS[bbId].min);
411 limitB[bbId].max = MIN2(limitS[bbId].max,
412 limitT[bbId].max + limitS[bbId].min);
413 }
414 }
415 }
416 // finally delete unnecessary barriers
417 for (bi->reset(); !bi->end(); bi->next()) {
418 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
419 BasicBlock *bb = BasicBlock::get(n);
420 Instruction *prev = NULL;
421 Instruction *next;
422 int max = limitT[bb->getId()].max;
423 for (Instruction *i = bb->getFirst(); i; i = next) {
424 next = i->next;
425 if (i->op == OP_TEXBAR) {
426 if (i->subOp >= max) {
427 delete_Instruction(prog, i);
428 i = NULL;
429 } else {
430 max = i->subOp;
431 if (prev && prev->op == OP_TEXBAR && prev->subOp >= max) {
432 delete_Instruction(prog, prev);
433 prev = NULL;
434 }
435 }
436 } else
437 if (isTextureOp(i->op)) {
438 max++;
439 }
440 if (i && !i->isNop())
441 prev = i;
442 }
443 }
444 return true;
445 }
446
447 bool
448 NVC0LegalizePostRA::visit(Function *fn)
449 {
450 if (needTexBar)
451 insertTextureBarriers(fn);
452
453 rZero = new_LValue(fn, FILE_GPR);
454 carry = new_LValue(fn, FILE_FLAGS);
455
456 rZero->reg.data.id = prog->getTarget()->getFileSize(FILE_GPR);
457 carry->reg.data.id = 0;
458
459 return true;
460 }
461
462 void
463 NVC0LegalizePostRA::replaceZero(Instruction *i)
464 {
465 for (int s = 0; i->srcExists(s); ++s) {
466 if (s == 2 && i->op == OP_SUCLAMP)
467 continue;
468 ImmediateValue *imm = i->getSrc(s)->asImm();
469 if (imm && imm->reg.data.u64 == 0)
470 i->setSrc(s, rZero);
471 }
472 }
473
474 // replace CONT with BRA for single unconditional continue
475 bool
476 NVC0LegalizePostRA::tryReplaceContWithBra(BasicBlock *bb)
477 {
478 if (bb->cfg.incidentCount() != 2 || bb->getEntry()->op != OP_PRECONT)
479 return false;
480 Graph::EdgeIterator ei = bb->cfg.incident();
481 if (ei.getType() != Graph::Edge::BACK)
482 ei.next();
483 if (ei.getType() != Graph::Edge::BACK)
484 return false;
485 BasicBlock *contBB = BasicBlock::get(ei.getNode());
486
487 if (!contBB->getExit() || contBB->getExit()->op != OP_CONT ||
488 contBB->getExit()->getPredicate())
489 return false;
490 contBB->getExit()->op = OP_BRA;
491 bb->remove(bb->getEntry()); // delete PRECONT
492
493 ei.next();
494 assert(ei.end() || ei.getType() != Graph::Edge::BACK);
495 return true;
496 }
497
498 // replace branches to join blocks with join ops
499 void
500 NVC0LegalizePostRA::propagateJoin(BasicBlock *bb)
501 {
502 if (bb->getEntry()->op != OP_JOIN || bb->getEntry()->asFlow()->limit)
503 return;
504 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
505 BasicBlock *in = BasicBlock::get(ei.getNode());
506 Instruction *exit = in->getExit();
507 if (!exit) {
508 in->insertTail(new FlowInstruction(func, OP_JOIN, bb));
509 // there should always be a terminator instruction
510 WARN("inserted missing terminator in BB:%i\n", in->getId());
511 } else
512 if (exit->op == OP_BRA) {
513 exit->op = OP_JOIN;
514 exit->asFlow()->limit = 1; // must-not-propagate marker
515 }
516 }
517 bb->remove(bb->getEntry());
518 }
519
520 bool
521 NVC0LegalizePostRA::visit(BasicBlock *bb)
522 {
523 Instruction *i, *next;
524
525 // remove pseudo operations and non-fixed no-ops, split 64 bit operations
526 for (i = bb->getFirst(); i; i = next) {
527 next = i->next;
528 if (i->op == OP_EMIT || i->op == OP_RESTART) {
529 if (!i->getDef(0)->refCount())
530 i->setDef(0, NULL);
531 if (i->src(0).getFile() == FILE_IMMEDIATE)
532 i->setSrc(0, rZero); // initial value must be 0
533 replaceZero(i);
534 } else
535 if (i->isNop()) {
536 bb->remove(i);
537 } else
538 if (i->op == OP_BAR && i->subOp == NV50_IR_SUBOP_BAR_SYNC &&
539 prog->getType() != Program::TYPE_COMPUTE) {
540 // It seems like barriers are never required for tessellation since
541 // the warp size is 32, and there are always at most 32 tcs threads.
542 bb->remove(i);
543 } else
544 if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LDC_IS) {
545 int offset = i->src(0).get()->reg.data.offset;
546 if (abs(offset) > 0x10000)
547 i->src(0).get()->reg.fileIndex += offset >> 16;
548 i->src(0).get()->reg.data.offset = (int)(short)offset;
549 } else {
550 // TODO: Move this to before register allocation for operations that
551 // need the $c register !
552 if (typeSizeof(i->dType) == 8) {
553 Instruction *hi;
554 hi = BuildUtil::split64BitOpPostRA(func, i, rZero, carry);
555 if (hi)
556 next = hi;
557 }
558
559 if (i->op != OP_MOV && i->op != OP_PFETCH)
560 replaceZero(i);
561 }
562 }
563 if (!bb->getEntry())
564 return true;
565
566 if (!tryReplaceContWithBra(bb))
567 propagateJoin(bb);
568
569 return true;
570 }
571
572 NVC0LoweringPass::NVC0LoweringPass(Program *prog) : targ(prog->getTarget())
573 {
574 bld.setProgram(prog);
575 gMemBase = NULL;
576 }
577
578 bool
579 NVC0LoweringPass::visit(Function *fn)
580 {
581 if (prog->getType() == Program::TYPE_GEOMETRY) {
582 assert(!strncmp(fn->getName(), "MAIN", 4));
583 // TODO: when we generate actual functions pass this value along somehow
584 bld.setPosition(BasicBlock::get(fn->cfg.getRoot()), false);
585 gpEmitAddress = bld.loadImm(NULL, 0)->asLValue();
586 if (fn->cfgExit) {
587 bld.setPosition(BasicBlock::get(fn->cfgExit)->getExit(), false);
588 bld.mkMovToReg(0, gpEmitAddress);
589 }
590 }
591 return true;
592 }
593
594 bool
595 NVC0LoweringPass::visit(BasicBlock *bb)
596 {
597 return true;
598 }
599
600 inline Value *
601 NVC0LoweringPass::loadTexHandle(Value *ptr, unsigned int slot)
602 {
603 uint8_t b = prog->driver->io.resInfoCBSlot;
604 uint32_t off = prog->driver->io.texBindBase + slot * 4;
605 return bld.
606 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
607 }
608
609 // move array source to first slot, convert to u16, add indirections
610 bool
611 NVC0LoweringPass::handleTEX(TexInstruction *i)
612 {
613 const int dim = i->tex.target.getDim() + i->tex.target.isCube();
614 const int arg = i->tex.target.getArgCount();
615 const int lyr = arg - (i->tex.target.isMS() ? 2 : 1);
616 const int chipset = prog->getTarget()->getChipset();
617
618 // Arguments to the TEX instruction are a little insane. Even though the
619 // encoding is identical between SM20 and SM30, the arguments mean
620 // different things between Fermi and Kepler+. A lot of arguments are
621 // optional based on flags passed to the instruction. This summarizes the
622 // order of things.
623 //
624 // Fermi:
625 // array/indirect
626 // coords
627 // sample
628 // lod bias
629 // depth compare
630 // offsets:
631 // - tg4: 8 bits each, either 2 (1 offset reg) or 8 (2 offset reg)
632 // - other: 4 bits each, single reg
633 //
634 // Kepler+:
635 // indirect handle
636 // array (+ offsets for txd in upper 16 bits)
637 // coords
638 // sample
639 // lod bias
640 // depth compare
641 // offsets (same as fermi, except txd which takes it with array)
642 //
643 // Maxwell (tex):
644 // array
645 // coords
646 // indirect handle
647 // sample
648 // lod bias
649 // depth compare
650 // offsets
651 //
652 // Maxwell (txd):
653 // indirect handle
654 // coords
655 // array + offsets
656 // derivatives
657
658 if (chipset >= NVISA_GK104_CHIPSET) {
659 if (i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
660 // XXX this ignores tsc, and assumes a 1:1 mapping
661 assert(i->tex.rIndirectSrc >= 0);
662 Value *hnd = loadTexHandle(
663 bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
664 i->getIndirectR(), bld.mkImm(2)),
665 i->tex.r);
666 i->tex.r = 0xff;
667 i->tex.s = 0x1f;
668 i->setIndirectR(hnd);
669 i->setIndirectS(NULL);
670 } else if (i->tex.r == i->tex.s || i->op == OP_TXF) {
671 i->tex.r += prog->driver->io.texBindBase / 4;
672 i->tex.s = 0; // only a single cX[] value possible here
673 } else {
674 Value *hnd = bld.getScratch();
675 Value *rHnd = loadTexHandle(NULL, i->tex.r);
676 Value *sHnd = loadTexHandle(NULL, i->tex.s);
677
678 bld.mkOp3(OP_INSBF, TYPE_U32, hnd, rHnd, bld.mkImm(0x1400), sHnd);
679
680 i->tex.r = 0; // not used for indirect tex
681 i->tex.s = 0;
682 i->setIndirectR(hnd);
683 }
684 if (i->tex.target.isArray()) {
685 LValue *layer = new_LValue(func, FILE_GPR);
686 Value *src = i->getSrc(lyr);
687 const int sat = (i->op == OP_TXF) ? 1 : 0;
688 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
689 bld.mkCvt(OP_CVT, TYPE_U16, layer, sTy, src)->saturate = sat;
690 if (i->op != OP_TXD || chipset < NVISA_GM107_CHIPSET) {
691 for (int s = dim; s >= 1; --s)
692 i->setSrc(s, i->getSrc(s - 1));
693 i->setSrc(0, layer);
694 } else {
695 i->setSrc(dim, layer);
696 }
697 }
698 // Move the indirect reference to the first place
699 if (i->tex.rIndirectSrc >= 0 && (
700 i->op == OP_TXD || chipset < NVISA_GM107_CHIPSET)) {
701 Value *hnd = i->getIndirectR();
702
703 i->setIndirectR(NULL);
704 i->moveSources(0, 1);
705 i->setSrc(0, hnd);
706 i->tex.rIndirectSrc = 0;
707 i->tex.sIndirectSrc = -1;
708 }
709 } else
710 // (nvc0) generate and move the tsc/tic/array source to the front
711 if (i->tex.target.isArray() || i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
712 LValue *src = new_LValue(func, FILE_GPR); // 0xttxsaaaa
713
714 Value *ticRel = i->getIndirectR();
715 Value *tscRel = i->getIndirectS();
716
717 if (ticRel) {
718 i->setSrc(i->tex.rIndirectSrc, NULL);
719 if (i->tex.r)
720 ticRel = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(),
721 ticRel, bld.mkImm(i->tex.r));
722 }
723 if (tscRel) {
724 i->setSrc(i->tex.sIndirectSrc, NULL);
725 if (i->tex.s)
726 tscRel = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(),
727 tscRel, bld.mkImm(i->tex.s));
728 }
729
730 Value *arrayIndex = i->tex.target.isArray() ? i->getSrc(lyr) : NULL;
731 for (int s = dim; s >= 1; --s)
732 i->setSrc(s, i->getSrc(s - 1));
733 i->setSrc(0, arrayIndex);
734
735 if (arrayIndex) {
736 int sat = (i->op == OP_TXF) ? 1 : 0;
737 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
738 bld.mkCvt(OP_CVT, TYPE_U16, src, sTy, arrayIndex)->saturate = sat;
739 } else {
740 bld.loadImm(src, 0);
741 }
742
743 if (ticRel)
744 bld.mkOp3(OP_INSBF, TYPE_U32, src, ticRel, bld.mkImm(0x0917), src);
745 if (tscRel)
746 bld.mkOp3(OP_INSBF, TYPE_U32, src, tscRel, bld.mkImm(0x0710), src);
747
748 i->setSrc(0, src);
749 }
750
751 // For nvc0, the sample id has to be in the second operand, as the offset
752 // does. Right now we don't know how to pass both in, and this case can't
753 // happen with OpenGL. On nve0, the sample id is part of the texture
754 // coordinate argument.
755 assert(chipset >= NVISA_GK104_CHIPSET ||
756 !i->tex.useOffsets || !i->tex.target.isMS());
757
758 // offset is between lod and dc
759 if (i->tex.useOffsets) {
760 int n, c;
761 int s = i->srcCount(0xff, true);
762 if (i->op != OP_TXD || chipset < NVISA_GK104_CHIPSET) {
763 if (i->tex.target.isShadow())
764 s--;
765 if (i->srcExists(s)) // move potential predicate out of the way
766 i->moveSources(s, 1);
767 if (i->tex.useOffsets == 4 && i->srcExists(s + 1))
768 i->moveSources(s + 1, 1);
769 }
770 if (i->op == OP_TXG) {
771 // Either there is 1 offset, which goes into the 2 low bytes of the
772 // first source, or there are 4 offsets, which go into 2 sources (8
773 // values, 1 byte each).
774 Value *offs[2] = {NULL, NULL};
775 for (n = 0; n < i->tex.useOffsets; n++) {
776 for (c = 0; c < 2; ++c) {
777 if ((n % 2) == 0 && c == 0)
778 offs[n / 2] = i->offset[n][c].get();
779 else
780 bld.mkOp3(OP_INSBF, TYPE_U32,
781 offs[n / 2],
782 i->offset[n][c].get(),
783 bld.mkImm(0x800 | ((n * 16 + c * 8) % 32)),
784 offs[n / 2]);
785 }
786 }
787 i->setSrc(s, offs[0]);
788 if (offs[1])
789 i->setSrc(s + 1, offs[1]);
790 } else {
791 unsigned imm = 0;
792 assert(i->tex.useOffsets == 1);
793 for (c = 0; c < 3; ++c) {
794 ImmediateValue val;
795 if (!i->offset[0][c].getImmediate(val))
796 assert(!"non-immediate offset passed to non-TXG");
797 imm |= (val.reg.data.u32 & 0xf) << (c * 4);
798 }
799 if (i->op == OP_TXD && chipset >= NVISA_GK104_CHIPSET) {
800 // The offset goes into the upper 16 bits of the array index. So
801 // create it if it's not already there, and INSBF it if it already
802 // is.
803 s = (i->tex.rIndirectSrc >= 0) ? 1 : 0;
804 if (chipset >= NVISA_GM107_CHIPSET)
805 s += dim;
806 if (i->tex.target.isArray()) {
807 bld.mkOp3(OP_INSBF, TYPE_U32, i->getSrc(s),
808 bld.loadImm(NULL, imm), bld.mkImm(0xc10),
809 i->getSrc(s));
810 } else {
811 i->moveSources(s, 1);
812 i->setSrc(s, bld.loadImm(NULL, imm << 16));
813 }
814 } else {
815 i->setSrc(s, bld.loadImm(NULL, imm));
816 }
817 }
818 }
819
820 if (chipset >= NVISA_GK104_CHIPSET) {
821 //
822 // If TEX requires more than 4 sources, the 2nd register tuple must be
823 // aligned to 4, even if it consists of just a single 4-byte register.
824 //
825 // XXX HACK: We insert 0 sources to avoid the 5 or 6 regs case.
826 //
827 int s = i->srcCount(0xff, true);
828 if (s > 4 && s < 7) {
829 if (i->srcExists(s)) // move potential predicate out of the way
830 i->moveSources(s, 7 - s);
831 while (s < 7)
832 i->setSrc(s++, bld.loadImm(NULL, 0));
833 }
834 }
835
836 return true;
837 }
838
839 bool
840 NVC0LoweringPass::handleManualTXD(TexInstruction *i)
841 {
842 static const uint8_t qOps[4][2] =
843 {
844 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(MOV2, MOV2, ADD, ADD) }, // l0
845 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(MOV2, MOV2, ADD, ADD) }, // l1
846 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l2
847 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l3
848 };
849 Value *def[4][4];
850 Value *crd[3];
851 Instruction *tex;
852 Value *zero = bld.loadImm(bld.getSSA(), 0);
853 int l, c;
854 const int dim = i->tex.target.getDim() + i->tex.target.isCube();
855 const int array = i->tex.target.isArray();
856
857 i->op = OP_TEX; // no need to clone dPdx/dPdy later
858
859 for (c = 0; c < dim; ++c)
860 crd[c] = bld.getScratch();
861
862 bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
863 for (l = 0; l < 4; ++l) {
864 // mov coordinates from lane l to all lanes
865 for (c = 0; c < dim; ++c)
866 bld.mkQuadop(0x00, crd[c], l, i->getSrc(c + array), zero);
867 // add dPdx from lane l to lanes dx
868 for (c = 0; c < dim; ++c)
869 bld.mkQuadop(qOps[l][0], crd[c], l, i->dPdx[c].get(), crd[c]);
870 // add dPdy from lane l to lanes dy
871 for (c = 0; c < dim; ++c)
872 bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
873 // texture
874 bld.insert(tex = cloneForward(func, i));
875 for (c = 0; c < dim; ++c)
876 tex->setSrc(c + array, crd[c]);
877 // save results
878 for (c = 0; i->defExists(c); ++c) {
879 Instruction *mov;
880 def[c][l] = bld.getSSA();
881 mov = bld.mkMov(def[c][l], tex->getDef(c));
882 mov->fixed = 1;
883 mov->lanes = 1 << l;
884 }
885 }
886 bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
887
888 for (c = 0; i->defExists(c); ++c) {
889 Instruction *u = bld.mkOp(OP_UNION, TYPE_U32, i->getDef(c));
890 for (l = 0; l < 4; ++l)
891 u->setSrc(l, def[c][l]);
892 }
893
894 i->bb->remove(i);
895 return true;
896 }
897
898 bool
899 NVC0LoweringPass::handleTXD(TexInstruction *txd)
900 {
901 int dim = txd->tex.target.getDim() + txd->tex.target.isCube();
902 unsigned arg = txd->tex.target.getArgCount();
903 unsigned expected_args = arg;
904 const int chipset = prog->getTarget()->getChipset();
905
906 if (chipset >= NVISA_GK104_CHIPSET) {
907 if (!txd->tex.target.isArray() && txd->tex.useOffsets)
908 expected_args++;
909 if (txd->tex.rIndirectSrc >= 0 || txd->tex.sIndirectSrc >= 0)
910 expected_args++;
911 } else {
912 if (txd->tex.useOffsets)
913 expected_args++;
914 if (!txd->tex.target.isArray() && (
915 txd->tex.rIndirectSrc >= 0 || txd->tex.sIndirectSrc >= 0))
916 expected_args++;
917 }
918
919 if (expected_args > 4 ||
920 dim > 2 ||
921 txd->tex.target.isShadow())
922 txd->op = OP_TEX;
923
924 handleTEX(txd);
925 while (txd->srcExists(arg))
926 ++arg;
927
928 txd->tex.derivAll = true;
929 if (txd->op == OP_TEX)
930 return handleManualTXD(txd);
931
932 assert(arg == expected_args);
933 for (int c = 0; c < dim; ++c) {
934 txd->setSrc(arg + c * 2 + 0, txd->dPdx[c]);
935 txd->setSrc(arg + c * 2 + 1, txd->dPdy[c]);
936 txd->dPdx[c].set(NULL);
937 txd->dPdy[c].set(NULL);
938 }
939 return true;
940 }
941
942 bool
943 NVC0LoweringPass::handleTXQ(TexInstruction *txq)
944 {
945 const int chipset = prog->getTarget()->getChipset();
946 if (chipset >= NVISA_GK104_CHIPSET && txq->tex.rIndirectSrc < 0)
947 txq->tex.r += prog->driver->io.texBindBase / 4;
948
949 if (txq->tex.rIndirectSrc < 0)
950 return true;
951
952 Value *ticRel = txq->getIndirectR();
953
954 txq->setIndirectS(NULL);
955 txq->tex.sIndirectSrc = -1;
956
957 assert(ticRel);
958
959 if (chipset < NVISA_GK104_CHIPSET) {
960 LValue *src = new_LValue(func, FILE_GPR); // 0xttxsaaaa
961
962 txq->setSrc(txq->tex.rIndirectSrc, NULL);
963 if (txq->tex.r)
964 ticRel = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(),
965 ticRel, bld.mkImm(txq->tex.r));
966
967 bld.mkOp2(OP_SHL, TYPE_U32, src, ticRel, bld.mkImm(0x17));
968
969 txq->moveSources(0, 1);
970 txq->setSrc(0, src);
971 } else {
972 Value *hnd = loadTexHandle(
973 bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
974 txq->getIndirectR(), bld.mkImm(2)),
975 txq->tex.r);
976 txq->tex.r = 0xff;
977 txq->tex.s = 0x1f;
978
979 txq->setIndirectR(NULL);
980 txq->moveSources(0, 1);
981 txq->setSrc(0, hnd);
982 txq->tex.rIndirectSrc = 0;
983 }
984
985 return true;
986 }
987
988 bool
989 NVC0LoweringPass::handleTXLQ(TexInstruction *i)
990 {
991 /* The outputs are inverted compared to what the TGSI instruction
992 * expects. Take that into account in the mask.
993 */
994 assert((i->tex.mask & ~3) == 0);
995 if (i->tex.mask == 1)
996 i->tex.mask = 2;
997 else if (i->tex.mask == 2)
998 i->tex.mask = 1;
999 handleTEX(i);
1000 bld.setPosition(i, true);
1001
1002 /* The returned values are not quite what we want:
1003 * (a) convert from s16/u16 to f32
1004 * (b) multiply by 1/256
1005 */
1006 for (int def = 0; def < 2; ++def) {
1007 if (!i->defExists(def))
1008 continue;
1009 enum DataType type = TYPE_S16;
1010 if (i->tex.mask == 2 || def > 0)
1011 type = TYPE_U16;
1012 bld.mkCvt(OP_CVT, TYPE_F32, i->getDef(def), type, i->getDef(def));
1013 bld.mkOp2(OP_MUL, TYPE_F32, i->getDef(def),
1014 i->getDef(def), bld.loadImm(NULL, 1.0f / 256));
1015 }
1016 if (i->tex.mask == 3) {
1017 LValue *t = new_LValue(func, FILE_GPR);
1018 bld.mkMov(t, i->getDef(0));
1019 bld.mkMov(i->getDef(0), i->getDef(1));
1020 bld.mkMov(i->getDef(1), t);
1021 }
1022 return true;
1023 }
1024
1025 bool
1026 NVC0LoweringPass::handleSUQ(Instruction *suq)
1027 {
1028 suq->op = OP_MOV;
1029 suq->setSrc(0, loadResLength32(suq->getIndirect(0, 1),
1030 suq->getSrc(0)->reg.fileIndex * 16));
1031 suq->setIndirect(0, 0, NULL);
1032 suq->setIndirect(0, 1, NULL);
1033 return true;
1034 }
1035
1036 void
1037 NVC0LoweringPass::handleSharedATOM(Instruction *atom)
1038 {
1039 assert(atom->src(0).getFile() == FILE_MEMORY_SHARED);
1040
1041 BasicBlock *currBB = atom->bb;
1042 BasicBlock *tryLockAndSetBB = atom->bb->splitBefore(atom, false);
1043 BasicBlock *joinBB = atom->bb->splitAfter(atom);
1044
1045 bld.setPosition(currBB, true);
1046 assert(!currBB->joinAt);
1047 currBB->joinAt = bld.mkFlow(OP_JOINAT, joinBB, CC_ALWAYS, NULL);
1048
1049 bld.mkFlow(OP_BRA, tryLockAndSetBB, CC_ALWAYS, NULL);
1050 currBB->cfg.attach(&tryLockAndSetBB->cfg, Graph::Edge::TREE);
1051
1052 bld.setPosition(tryLockAndSetBB, true);
1053
1054 Instruction *ld =
1055 bld.mkLoad(TYPE_U32, atom->getDef(0),
1056 bld.mkSymbol(FILE_MEMORY_SHARED, 0, TYPE_U32, 0), NULL);
1057 ld->setDef(1, bld.getSSA(1, FILE_PREDICATE));
1058 ld->subOp = NV50_IR_SUBOP_LOAD_LOCKED;
1059
1060 Value *stVal;
1061 if (atom->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
1062 // Read the old value, and write the new one.
1063 stVal = atom->getSrc(1);
1064 } else if (atom->subOp == NV50_IR_SUBOP_ATOM_CAS) {
1065 CmpInstruction *set =
1066 bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
1067 TYPE_U32, ld->getDef(0), atom->getSrc(1));
1068 set->setPredicate(CC_P, ld->getDef(1));
1069
1070 Instruction *selp =
1071 bld.mkOp3(OP_SELP, TYPE_U32, bld.getSSA(), ld->getDef(0),
1072 atom->getSrc(2), set->getDef(0));
1073 selp->src(2).mod = Modifier(NV50_IR_MOD_NOT);
1074 selp->setPredicate(CC_P, ld->getDef(1));
1075
1076 stVal = selp->getDef(0);
1077 } else {
1078 operation op;
1079
1080 switch (atom->subOp) {
1081 case NV50_IR_SUBOP_ATOM_ADD:
1082 op = OP_ADD;
1083 break;
1084 case NV50_IR_SUBOP_ATOM_AND:
1085 op = OP_AND;
1086 break;
1087 case NV50_IR_SUBOP_ATOM_OR:
1088 op = OP_OR;
1089 break;
1090 case NV50_IR_SUBOP_ATOM_XOR:
1091 op = OP_XOR;
1092 break;
1093 case NV50_IR_SUBOP_ATOM_MIN:
1094 op = OP_MIN;
1095 break;
1096 case NV50_IR_SUBOP_ATOM_MAX:
1097 op = OP_MAX;
1098 break;
1099 default:
1100 assert(0);
1101 }
1102
1103 Instruction *i =
1104 bld.mkOp2(op, atom->dType, bld.getSSA(), ld->getDef(0),
1105 atom->getSrc(1));
1106 i->setPredicate(CC_P, ld->getDef(1));
1107
1108 stVal = i->getDef(0);
1109 }
1110
1111 Instruction *st =
1112 bld.mkStore(OP_STORE, TYPE_U32,
1113 bld.mkSymbol(FILE_MEMORY_SHARED, 0, TYPE_U32, 0),
1114 NULL, stVal);
1115 st->setPredicate(CC_P, ld->getDef(1));
1116 st->subOp = NV50_IR_SUBOP_STORE_UNLOCKED;
1117
1118 // Loop until the lock is acquired.
1119 bld.mkFlow(OP_BRA, tryLockAndSetBB, CC_NOT_P, ld->getDef(1));
1120 tryLockAndSetBB->cfg.attach(&tryLockAndSetBB->cfg, Graph::Edge::BACK);
1121 tryLockAndSetBB->cfg.attach(&joinBB->cfg, Graph::Edge::CROSS);
1122 bld.mkFlow(OP_BRA, joinBB, CC_ALWAYS, NULL);
1123
1124 bld.remove(atom);
1125
1126 bld.setPosition(joinBB, false);
1127 bld.mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1128 }
1129
1130 bool
1131 NVC0LoweringPass::handleATOM(Instruction *atom)
1132 {
1133 SVSemantic sv;
1134 Value *ptr = atom->getIndirect(0, 0), *ind = atom->getIndirect(0, 1), *base;
1135
1136 switch (atom->src(0).getFile()) {
1137 case FILE_MEMORY_LOCAL:
1138 sv = SV_LBASE;
1139 break;
1140 case FILE_MEMORY_SHARED:
1141 handleSharedATOM(atom);
1142 return true;
1143 default:
1144 assert(atom->src(0).getFile() == FILE_MEMORY_GLOBAL);
1145 base = loadResInfo64(ind, atom->getSrc(0)->reg.fileIndex * 16);
1146 assert(base->reg.size == 8);
1147 if (ptr)
1148 base = bld.mkOp2v(OP_ADD, TYPE_U64, base, base, ptr);
1149 assert(base->reg.size == 8);
1150 atom->setIndirect(0, 0, base);
1151 return true;
1152 }
1153 base =
1154 bld.mkOp1v(OP_RDSV, TYPE_U32, bld.getScratch(), bld.mkSysVal(sv, 0));
1155
1156 atom->setSrc(0, cloneShallow(func, atom->getSrc(0)));
1157 atom->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
1158 if (ptr)
1159 base = bld.mkOp2v(OP_ADD, TYPE_U32, base, base, ptr);
1160 atom->setIndirect(0, 1, NULL);
1161 atom->setIndirect(0, 0, base);
1162
1163 return true;
1164 }
1165
1166 bool
1167 NVC0LoweringPass::handleCasExch(Instruction *cas, bool needCctl)
1168 {
1169 if (cas->src(0).getFile() == FILE_MEMORY_SHARED) {
1170 // ATOM_CAS and ATOM_EXCH are handled in handleSharedATOM().
1171 return false;
1172 }
1173
1174 if (cas->subOp != NV50_IR_SUBOP_ATOM_CAS &&
1175 cas->subOp != NV50_IR_SUBOP_ATOM_EXCH)
1176 return false;
1177 bld.setPosition(cas, true);
1178
1179 if (needCctl) {
1180 Instruction *cctl = bld.mkOp1(OP_CCTL, TYPE_NONE, NULL, cas->getSrc(0));
1181 cctl->setIndirect(0, 0, cas->getIndirect(0, 0));
1182 cctl->fixed = 1;
1183 cctl->subOp = NV50_IR_SUBOP_CCTL_IV;
1184 if (cas->isPredicated())
1185 cctl->setPredicate(cas->cc, cas->getPredicate());
1186 }
1187
1188 if (cas->subOp == NV50_IR_SUBOP_ATOM_CAS) {
1189 // CAS is crazy. It's 2nd source is a double reg, and the 3rd source
1190 // should be set to the high part of the double reg or bad things will
1191 // happen elsewhere in the universe.
1192 // Also, it sometimes returns the new value instead of the old one
1193 // under mysterious circumstances.
1194 Value *dreg = bld.getSSA(8);
1195 bld.setPosition(cas, false);
1196 bld.mkOp2(OP_MERGE, TYPE_U64, dreg, cas->getSrc(1), cas->getSrc(2));
1197 cas->setSrc(1, dreg);
1198 cas->setSrc(2, dreg);
1199 }
1200
1201 return true;
1202 }
1203
1204 inline Value *
1205 NVC0LoweringPass::loadResInfo32(Value *ptr, uint32_t off)
1206 {
1207 uint8_t b = prog->driver->io.resInfoCBSlot;
1208 off += prog->driver->io.suInfoBase;
1209 return bld.
1210 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
1211 }
1212
1213 inline Value *
1214 NVC0LoweringPass::loadResInfo64(Value *ptr, uint32_t off)
1215 {
1216 uint8_t b = prog->driver->io.resInfoCBSlot;
1217 off += prog->driver->io.suInfoBase;
1218
1219 if (ptr)
1220 ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(), ptr, bld.mkImm(4));
1221
1222 return bld.
1223 mkLoadv(TYPE_U64, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U64, off), ptr);
1224 }
1225
1226 inline Value *
1227 NVC0LoweringPass::loadResLength32(Value *ptr, uint32_t off)
1228 {
1229 uint8_t b = prog->driver->io.resInfoCBSlot;
1230 off += prog->driver->io.suInfoBase;
1231
1232 if (ptr)
1233 ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(), ptr, bld.mkImm(4));
1234
1235 return bld.
1236 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U64, off + 8), ptr);
1237 }
1238
1239 inline Value *
1240 NVC0LoweringPass::loadMsInfo32(Value *ptr, uint32_t off)
1241 {
1242 uint8_t b = prog->driver->io.msInfoCBSlot;
1243 off += prog->driver->io.msInfoBase;
1244 return bld.
1245 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
1246 }
1247
1248 /* On nvc0, surface info is obtained via the surface binding points passed
1249 * to the SULD/SUST instructions.
1250 * On nve4, surface info is stored in c[] and is used by various special
1251 * instructions, e.g. for clamping coordiantes or generating an address.
1252 * They couldn't just have added an equivalent to TIC now, couldn't they ?
1253 */
1254 #define NVE4_SU_INFO_ADDR 0x00
1255 #define NVE4_SU_INFO_FMT 0x04
1256 #define NVE4_SU_INFO_DIM_X 0x08
1257 #define NVE4_SU_INFO_PITCH 0x0c
1258 #define NVE4_SU_INFO_DIM_Y 0x10
1259 #define NVE4_SU_INFO_ARRAY 0x14
1260 #define NVE4_SU_INFO_DIM_Z 0x18
1261 #define NVE4_SU_INFO_UNK1C 0x1c
1262 #define NVE4_SU_INFO_WIDTH 0x20
1263 #define NVE4_SU_INFO_HEIGHT 0x24
1264 #define NVE4_SU_INFO_DEPTH 0x28
1265 #define NVE4_SU_INFO_TARGET 0x2c
1266 #define NVE4_SU_INFO_CALL 0x30
1267 #define NVE4_SU_INFO_RAW_X 0x34
1268 #define NVE4_SU_INFO_MS_X 0x38
1269 #define NVE4_SU_INFO_MS_Y 0x3c
1270
1271 #define NVE4_SU_INFO__STRIDE 0x40
1272
1273 #define NVE4_SU_INFO_DIM(i) (0x08 + (i) * 8)
1274 #define NVE4_SU_INFO_SIZE(i) (0x20 + (i) * 4)
1275 #define NVE4_SU_INFO_MS(i) (0x38 + (i) * 4)
1276
1277 static inline uint16_t getSuClampSubOp(const TexInstruction *su, int c)
1278 {
1279 switch (su->tex.target.getEnum()) {
1280 case TEX_TARGET_BUFFER: return NV50_IR_SUBOP_SUCLAMP_PL(0, 1);
1281 case TEX_TARGET_RECT: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1282 case TEX_TARGET_1D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1283 case TEX_TARGET_1D_ARRAY: return (c == 1) ?
1284 NV50_IR_SUBOP_SUCLAMP_PL(0, 2) :
1285 NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1286 case TEX_TARGET_2D: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1287 case TEX_TARGET_2D_MS: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1288 case TEX_TARGET_2D_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1289 case TEX_TARGET_2D_MS_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1290 case TEX_TARGET_3D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1291 case TEX_TARGET_CUBE: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1292 case TEX_TARGET_CUBE_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1293 default:
1294 assert(0);
1295 return 0;
1296 }
1297 }
1298
1299 void
1300 NVC0LoweringPass::adjustCoordinatesMS(TexInstruction *tex)
1301 {
1302 const uint16_t base = tex->tex.r * NVE4_SU_INFO__STRIDE;
1303 const int arg = tex->tex.target.getArgCount();
1304
1305 if (tex->tex.target == TEX_TARGET_2D_MS)
1306 tex->tex.target = TEX_TARGET_2D;
1307 else
1308 if (tex->tex.target == TEX_TARGET_2D_MS_ARRAY)
1309 tex->tex.target = TEX_TARGET_2D_ARRAY;
1310 else
1311 return;
1312
1313 Value *x = tex->getSrc(0);
1314 Value *y = tex->getSrc(1);
1315 Value *s = tex->getSrc(arg - 1);
1316
1317 Value *tx = bld.getSSA(), *ty = bld.getSSA(), *ts = bld.getSSA();
1318
1319 Value *ms_x = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(0));
1320 Value *ms_y = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(1));
1321
1322 bld.mkOp2(OP_SHL, TYPE_U32, tx, x, ms_x);
1323 bld.mkOp2(OP_SHL, TYPE_U32, ty, y, ms_y);
1324
1325 s = bld.mkOp2v(OP_AND, TYPE_U32, ts, s, bld.loadImm(NULL, 0x7));
1326 s = bld.mkOp2v(OP_SHL, TYPE_U32, ts, ts, bld.mkImm(3));
1327
1328 Value *dx = loadMsInfo32(ts, 0x0);
1329 Value *dy = loadMsInfo32(ts, 0x4);
1330
1331 bld.mkOp2(OP_ADD, TYPE_U32, tx, tx, dx);
1332 bld.mkOp2(OP_ADD, TYPE_U32, ty, ty, dy);
1333
1334 tex->setSrc(0, tx);
1335 tex->setSrc(1, ty);
1336 tex->moveSources(arg, -1);
1337 }
1338
1339 // Sets 64-bit "generic address", predicate and format sources for SULD/SUST.
1340 // They're computed from the coordinates using the surface info in c[] space.
1341 void
1342 NVC0LoweringPass::processSurfaceCoordsNVE4(TexInstruction *su)
1343 {
1344 Instruction *insn;
1345 const bool atom = su->op == OP_SUREDB || su->op == OP_SUREDP;
1346 const bool raw =
1347 su->op == OP_SULDB || su->op == OP_SUSTB || su->op == OP_SUREDB;
1348 const int idx = su->tex.r;
1349 const int dim = su->tex.target.getDim();
1350 const int arg = dim + (su->tex.target.isArray() ? 1 : 0);
1351 const uint16_t base = idx * NVE4_SU_INFO__STRIDE;
1352 int c;
1353 Value *zero = bld.mkImm(0);
1354 Value *p1 = NULL;
1355 Value *v;
1356 Value *src[3];
1357 Value *bf, *eau, *off;
1358 Value *addr, *pred;
1359
1360 off = bld.getScratch(4);
1361 bf = bld.getScratch(4);
1362 addr = bld.getSSA(8);
1363 pred = bld.getScratch(1, FILE_PREDICATE);
1364
1365 bld.setPosition(su, false);
1366
1367 adjustCoordinatesMS(su);
1368
1369 // calculate clamped coordinates
1370 for (c = 0; c < arg; ++c) {
1371 src[c] = bld.getScratch();
1372 if (c == 0 && raw)
1373 v = loadResInfo32(NULL, base + NVE4_SU_INFO_RAW_X);
1374 else
1375 v = loadResInfo32(NULL, base + NVE4_SU_INFO_DIM(c));
1376 bld.mkOp3(OP_SUCLAMP, TYPE_S32, src[c], su->getSrc(c), v, zero)
1377 ->subOp = getSuClampSubOp(su, c);
1378 }
1379 for (; c < 3; ++c)
1380 src[c] = zero;
1381
1382 // set predicate output
1383 if (su->tex.target == TEX_TARGET_BUFFER) {
1384 src[0]->getInsn()->setFlagsDef(1, pred);
1385 } else
1386 if (su->tex.target.isArray()) {
1387 p1 = bld.getSSA(1, FILE_PREDICATE);
1388 src[dim]->getInsn()->setFlagsDef(1, p1);
1389 }
1390
1391 // calculate pixel offset
1392 if (dim == 1) {
1393 if (su->tex.target != TEX_TARGET_BUFFER)
1394 bld.mkOp2(OP_AND, TYPE_U32, off, src[0], bld.loadImm(NULL, 0xffff));
1395 } else
1396 if (dim == 3) {
1397 v = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
1398 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[2], v, src[1])
1399 ->subOp = NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1400
1401 v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
1402 bld.mkOp3(OP_MADSP, TYPE_U32, off, off, v, src[0])
1403 ->subOp = NV50_IR_SUBOP_MADSP(0,2,8); // u32 u16l u16l
1404 } else {
1405 assert(dim == 2);
1406 v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
1407 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[1], v, src[0])
1408 ->subOp = su->tex.target.isArray() ?
1409 NV50_IR_SUBOP_MADSP_SD : NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1410 }
1411
1412 // calculate effective address part 1
1413 if (su->tex.target == TEX_TARGET_BUFFER) {
1414 if (raw) {
1415 bf = src[0];
1416 } else {
1417 v = loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
1418 bld.mkOp3(OP_VSHL, TYPE_U32, bf, src[0], v, zero)
1419 ->subOp = NV50_IR_SUBOP_V1(7,6,8|2);
1420 }
1421 } else {
1422 Value *y = src[1];
1423 Value *z = src[2];
1424 uint16_t subOp = 0;
1425
1426 switch (dim) {
1427 case 1:
1428 y = zero;
1429 z = zero;
1430 break;
1431 case 2:
1432 z = off;
1433 if (!su->tex.target.isArray()) {
1434 z = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
1435 subOp = NV50_IR_SUBOP_SUBFM_3D;
1436 }
1437 break;
1438 default:
1439 subOp = NV50_IR_SUBOP_SUBFM_3D;
1440 assert(dim == 3);
1441 break;
1442 }
1443 insn = bld.mkOp3(OP_SUBFM, TYPE_U32, bf, src[0], y, z);
1444 insn->subOp = subOp;
1445 insn->setFlagsDef(1, pred);
1446 }
1447
1448 // part 2
1449 v = loadResInfo32(NULL, base + NVE4_SU_INFO_ADDR);
1450
1451 if (su->tex.target == TEX_TARGET_BUFFER) {
1452 eau = v;
1453 } else {
1454 eau = bld.mkOp3v(OP_SUEAU, TYPE_U32, bld.getScratch(4), off, bf, v);
1455 }
1456 // add array layer offset
1457 if (su->tex.target.isArray()) {
1458 v = loadResInfo32(NULL, base + NVE4_SU_INFO_ARRAY);
1459 if (dim == 1)
1460 bld.mkOp3(OP_MADSP, TYPE_U32, eau, src[1], v, eau)
1461 ->subOp = NV50_IR_SUBOP_MADSP(4,0,0); // u16 u24 u32
1462 else
1463 bld.mkOp3(OP_MADSP, TYPE_U32, eau, v, src[2], eau)
1464 ->subOp = NV50_IR_SUBOP_MADSP(0,0,0); // u32 u24 u32
1465 // combine predicates
1466 assert(p1);
1467 bld.mkOp2(OP_OR, TYPE_U8, pred, pred, p1);
1468 }
1469
1470 if (atom) {
1471 Value *lo = bf;
1472 if (su->tex.target == TEX_TARGET_BUFFER) {
1473 lo = zero;
1474 bld.mkMov(off, bf);
1475 }
1476 // bf == g[] address & 0xff
1477 // eau == g[] address >> 8
1478 bld.mkOp3(OP_PERMT, TYPE_U32, bf, lo, bld.loadImm(NULL, 0x6540), eau);
1479 bld.mkOp3(OP_PERMT, TYPE_U32, eau, zero, bld.loadImm(NULL, 0x0007), eau);
1480 } else
1481 if (su->op == OP_SULDP && su->tex.target == TEX_TARGET_BUFFER) {
1482 // Convert from u32 to u8 address format, which is what the library code
1483 // doing SULDP currently uses.
1484 // XXX: can SUEAU do this ?
1485 // XXX: does it matter that we don't mask high bytes in bf ?
1486 // Grrr.
1487 bld.mkOp2(OP_SHR, TYPE_U32, off, bf, bld.mkImm(8));
1488 bld.mkOp2(OP_ADD, TYPE_U32, eau, eau, off);
1489 }
1490
1491 bld.mkOp2(OP_MERGE, TYPE_U64, addr, bf, eau);
1492
1493 if (atom && su->tex.target == TEX_TARGET_BUFFER)
1494 bld.mkOp2(OP_ADD, TYPE_U64, addr, addr, off);
1495
1496 // let's just set it 0 for raw access and hope it works
1497 v = raw ?
1498 bld.mkImm(0) : loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
1499
1500 // get rid of old coordinate sources, make space for fmt info and predicate
1501 su->moveSources(arg, 3 - arg);
1502 // set 64 bit address and 32-bit format sources
1503 su->setSrc(0, addr);
1504 su->setSrc(1, v);
1505 su->setSrc(2, pred);
1506 }
1507
1508 void
1509 NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction *su)
1510 {
1511 processSurfaceCoordsNVE4(su);
1512
1513 // Who do we hate more ? The person who decided that nvc0's SULD doesn't
1514 // have to support conversion or the person who decided that, in OpenCL,
1515 // you don't have to specify the format here like you do in OpenGL ?
1516
1517 if (su->op == OP_SULDP) {
1518 // We don't patch shaders. Ever.
1519 // You get an indirect call to our library blob here.
1520 // But at least it's uniform.
1521 FlowInstruction *call;
1522 LValue *p[3];
1523 LValue *r[5];
1524 uint16_t base = su->tex.r * NVE4_SU_INFO__STRIDE + NVE4_SU_INFO_CALL;
1525
1526 for (int i = 0; i < 4; ++i)
1527 (r[i] = bld.getScratch(4, FILE_GPR))->reg.data.id = i;
1528 for (int i = 0; i < 3; ++i)
1529 (p[i] = bld.getScratch(1, FILE_PREDICATE))->reg.data.id = i;
1530 (r[4] = bld.getScratch(8, FILE_GPR))->reg.data.id = 4;
1531
1532 bld.mkMov(p[1], bld.mkImm((su->cache == CACHE_CA) ? 1 : 0), TYPE_U8);
1533 bld.mkMov(p[2], bld.mkImm((su->cache == CACHE_CG) ? 1 : 0), TYPE_U8);
1534 bld.mkMov(p[0], su->getSrc(2), TYPE_U8);
1535 bld.mkMov(r[4], su->getSrc(0), TYPE_U64);
1536 bld.mkMov(r[2], su->getSrc(1), TYPE_U32);
1537
1538 call = bld.mkFlow(OP_CALL, NULL, su->cc, su->getPredicate());
1539
1540 call->indirect = 1;
1541 call->absolute = 1;
1542 call->setSrc(0, bld.mkSymbol(FILE_MEMORY_CONST,
1543 prog->driver->io.resInfoCBSlot, TYPE_U32,
1544 prog->driver->io.suInfoBase + base));
1545 call->setSrc(1, r[2]);
1546 call->setSrc(2, r[4]);
1547 for (int i = 0; i < 3; ++i)
1548 call->setSrc(3 + i, p[i]);
1549 for (int i = 0; i < 4; ++i) {
1550 call->setDef(i, r[i]);
1551 bld.mkMov(su->getDef(i), r[i]);
1552 }
1553 call->setDef(4, p[1]);
1554 delete_Instruction(bld.getProgram(), su);
1555 }
1556
1557 if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
1558 // FIXME: for out of bounds access, destination value will be undefined !
1559 Value *pred = su->getSrc(2);
1560 CondCode cc = CC_NOT_P;
1561 if (su->getPredicate()) {
1562 pred = bld.getScratch(1, FILE_PREDICATE);
1563 cc = su->cc;
1564 if (cc == CC_NOT_P) {
1565 bld.mkOp2(OP_OR, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1566 } else {
1567 bld.mkOp2(OP_AND, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1568 pred->getInsn()->src(1).mod = Modifier(NV50_IR_MOD_NOT);
1569 }
1570 }
1571 Instruction *red = bld.mkOp(OP_ATOM, su->dType, su->getDef(0));
1572 red->subOp = su->subOp;
1573 if (!gMemBase)
1574 gMemBase = bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0);
1575 red->setSrc(0, gMemBase);
1576 red->setSrc(1, su->getSrc(3));
1577 if (su->subOp == NV50_IR_SUBOP_ATOM_CAS)
1578 red->setSrc(2, su->getSrc(4));
1579 red->setIndirect(0, 0, su->getSrc(0));
1580 red->setPredicate(cc, pred);
1581 delete_Instruction(bld.getProgram(), su);
1582 handleCasExch(red, true);
1583 } else {
1584 su->sType = (su->tex.target == TEX_TARGET_BUFFER) ? TYPE_U32 : TYPE_U8;
1585 }
1586 }
1587
1588 bool
1589 NVC0LoweringPass::handleWRSV(Instruction *i)
1590 {
1591 Instruction *st;
1592 Symbol *sym;
1593 uint32_t addr;
1594
1595 // must replace, $sreg are not writeable
1596 addr = targ->getSVAddress(FILE_SHADER_OUTPUT, i->getSrc(0)->asSym());
1597 if (addr >= 0x400)
1598 return false;
1599 sym = bld.mkSymbol(FILE_SHADER_OUTPUT, 0, i->sType, addr);
1600
1601 st = bld.mkStore(OP_EXPORT, i->dType, sym, i->getIndirect(0, 0),
1602 i->getSrc(1));
1603 st->perPatch = i->perPatch;
1604
1605 bld.getBB()->remove(i);
1606 return true;
1607 }
1608
1609 void
1610 NVC0LoweringPass::readTessCoord(LValue *dst, int c)
1611 {
1612 Value *laneid = bld.getSSA();
1613 Value *x, *y;
1614
1615 bld.mkOp1(OP_RDSV, TYPE_U32, laneid, bld.mkSysVal(SV_LANEID, 0));
1616
1617 if (c == 0) {
1618 x = dst;
1619 y = NULL;
1620 } else
1621 if (c == 1) {
1622 x = NULL;
1623 y = dst;
1624 } else {
1625 assert(c == 2);
1626 x = bld.getSSA();
1627 y = bld.getSSA();
1628 }
1629 if (x)
1630 bld.mkFetch(x, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f0, NULL, laneid);
1631 if (y)
1632 bld.mkFetch(y, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f4, NULL, laneid);
1633
1634 if (c == 2) {
1635 bld.mkOp2(OP_ADD, TYPE_F32, dst, x, y);
1636 bld.mkOp2(OP_SUB, TYPE_F32, dst, bld.loadImm(NULL, 1.0f), dst);
1637 }
1638 }
1639
1640 bool
1641 NVC0LoweringPass::handleRDSV(Instruction *i)
1642 {
1643 Symbol *sym = i->getSrc(0)->asSym();
1644 const SVSemantic sv = sym->reg.data.sv.sv;
1645 Value *vtx = NULL;
1646 Instruction *ld;
1647 uint32_t addr = targ->getSVAddress(FILE_SHADER_INPUT, sym);
1648
1649 if (addr >= 0x400) {
1650 // mov $sreg
1651 if (sym->reg.data.sv.index == 3) {
1652 // TGSI backend may use 4th component of TID,NTID,CTAID,NCTAID
1653 i->op = OP_MOV;
1654 i->setSrc(0, bld.mkImm((sv == SV_NTID || sv == SV_NCTAID) ? 1 : 0));
1655 }
1656 if (sv == SV_VERTEX_COUNT) {
1657 bld.setPosition(i, true);
1658 bld.mkOp2(OP_EXTBF, TYPE_U32, i->getDef(0), i->getDef(0), bld.mkImm(0x808));
1659 }
1660 return true;
1661 }
1662
1663 switch (sv) {
1664 case SV_POSITION:
1665 assert(prog->getType() == Program::TYPE_FRAGMENT);
1666 if (i->srcExists(1)) {
1667 // Pass offset through to the interpolation logic
1668 ld = bld.mkInterp(NV50_IR_INTERP_LINEAR | NV50_IR_INTERP_OFFSET,
1669 i->getDef(0), addr, NULL);
1670 ld->setSrc(1, i->getSrc(1));
1671 } else {
1672 bld.mkInterp(NV50_IR_INTERP_LINEAR, i->getDef(0), addr, NULL);
1673 }
1674 break;
1675 case SV_FACE:
1676 {
1677 Value *face = i->getDef(0);
1678 bld.mkInterp(NV50_IR_INTERP_FLAT, face, addr, NULL);
1679 if (i->dType == TYPE_F32) {
1680 bld.mkOp2(OP_OR, TYPE_U32, face, face, bld.mkImm(0x00000001));
1681 bld.mkOp1(OP_NEG, TYPE_S32, face, face);
1682 bld.mkCvt(OP_CVT, TYPE_F32, face, TYPE_S32, face);
1683 }
1684 }
1685 break;
1686 case SV_TESS_COORD:
1687 assert(prog->getType() == Program::TYPE_TESSELLATION_EVAL);
1688 readTessCoord(i->getDef(0)->asLValue(), i->getSrc(0)->reg.data.sv.index);
1689 break;
1690 case SV_NTID:
1691 case SV_NCTAID:
1692 case SV_GRIDID:
1693 assert(targ->getChipset() >= NVISA_GK104_CHIPSET); // mov $sreg otherwise
1694 if (sym->reg.data.sv.index == 3) {
1695 i->op = OP_MOV;
1696 i->setSrc(0, bld.mkImm(sv == SV_GRIDID ? 0 : 1));
1697 return true;
1698 }
1699 addr += prog->driver->prop.cp.gridInfoBase;
1700 bld.mkLoad(TYPE_U32, i->getDef(0),
1701 bld.mkSymbol(FILE_MEMORY_CONST, 0, TYPE_U32, addr), NULL);
1702 break;
1703 case SV_SAMPLE_INDEX:
1704 // TODO: Properly pass source as an address in the PIX address space
1705 // (which can be of the form [r0+offset]). But this is currently
1706 // unnecessary.
1707 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
1708 ld->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
1709 break;
1710 case SV_SAMPLE_POS: {
1711 Value *off = new_LValue(func, FILE_GPR);
1712 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
1713 ld->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
1714 bld.mkOp2(OP_SHL, TYPE_U32, off, i->getDef(0), bld.mkImm(3));
1715 bld.mkLoad(TYPE_F32,
1716 i->getDef(0),
1717 bld.mkSymbol(
1718 FILE_MEMORY_CONST, prog->driver->io.resInfoCBSlot,
1719 TYPE_U32, prog->driver->io.sampleInfoBase +
1720 4 * sym->reg.data.sv.index),
1721 off);
1722 break;
1723 }
1724 case SV_SAMPLE_MASK:
1725 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
1726 ld->subOp = NV50_IR_SUBOP_PIXLD_COVMASK;
1727 break;
1728 case SV_BASEVERTEX:
1729 case SV_BASEINSTANCE:
1730 case SV_DRAWID:
1731 ld = bld.mkLoad(TYPE_U32, i->getDef(0),
1732 bld.mkSymbol(FILE_MEMORY_CONST,
1733 prog->driver->io.auxCBSlot,
1734 TYPE_U32,
1735 prog->driver->io.drawInfoBase +
1736 4 * (sv - SV_BASEVERTEX)),
1737 NULL);
1738 break;
1739 default:
1740 if (prog->getType() == Program::TYPE_TESSELLATION_EVAL && !i->perPatch)
1741 vtx = bld.mkOp1v(OP_PFETCH, TYPE_U32, bld.getSSA(), bld.mkImm(0));
1742 ld = bld.mkFetch(i->getDef(0), i->dType,
1743 FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
1744 ld->perPatch = i->perPatch;
1745 break;
1746 }
1747 bld.getBB()->remove(i);
1748 return true;
1749 }
1750
1751 bool
1752 NVC0LoweringPass::handleDIV(Instruction *i)
1753 {
1754 if (!isFloatType(i->dType))
1755 return true;
1756 bld.setPosition(i, false);
1757 Instruction *rcp = bld.mkOp1(OP_RCP, i->dType, bld.getSSA(typeSizeof(i->dType)), i->getSrc(1));
1758 i->op = OP_MUL;
1759 i->setSrc(1, rcp->getDef(0));
1760 return true;
1761 }
1762
1763 bool
1764 NVC0LoweringPass::handleMOD(Instruction *i)
1765 {
1766 if (!isFloatType(i->dType))
1767 return true;
1768 LValue *value = bld.getScratch(typeSizeof(i->dType));
1769 bld.mkOp1(OP_RCP, i->dType, value, i->getSrc(1));
1770 bld.mkOp2(OP_MUL, i->dType, value, i->getSrc(0), value);
1771 bld.mkOp1(OP_TRUNC, i->dType, value, value);
1772 bld.mkOp2(OP_MUL, i->dType, value, i->getSrc(1), value);
1773 i->op = OP_SUB;
1774 i->setSrc(1, value);
1775 return true;
1776 }
1777
1778 bool
1779 NVC0LoweringPass::handleSQRT(Instruction *i)
1780 {
1781 if (i->dType == TYPE_F64) {
1782 Value *pred = bld.getSSA(1, FILE_PREDICATE);
1783 Value *zero = bld.loadImm(NULL, 0);
1784 Value *dst = bld.getSSA(8);
1785 bld.mkOp1(OP_RSQ, i->dType, dst, i->getSrc(0));
1786 bld.mkCmp(OP_SET, CC_LE, i->dType, pred, i->dType, i->getSrc(0), zero);
1787 bld.mkOp3(OP_SELP, TYPE_U64, dst, zero, dst, pred);
1788 i->op = OP_MUL;
1789 i->setSrc(1, dst);
1790 // TODO: Handle this properly with a library function
1791 } else {
1792 bld.setPosition(i, true);
1793 i->op = OP_RSQ;
1794 bld.mkOp1(OP_RCP, i->dType, i->getDef(0), i->getDef(0));
1795 }
1796
1797 return true;
1798 }
1799
1800 bool
1801 NVC0LoweringPass::handlePOW(Instruction *i)
1802 {
1803 LValue *val = bld.getScratch();
1804
1805 bld.mkOp1(OP_LG2, TYPE_F32, val, i->getSrc(0));
1806 bld.mkOp2(OP_MUL, TYPE_F32, val, i->getSrc(1), val)->dnz = 1;
1807 bld.mkOp1(OP_PREEX2, TYPE_F32, val, val);
1808
1809 i->op = OP_EX2;
1810 i->setSrc(0, val);
1811 i->setSrc(1, NULL);
1812
1813 return true;
1814 }
1815
1816 bool
1817 NVC0LoweringPass::handleEXPORT(Instruction *i)
1818 {
1819 if (prog->getType() == Program::TYPE_FRAGMENT) {
1820 int id = i->getSrc(0)->reg.data.offset / 4;
1821
1822 if (i->src(0).isIndirect(0)) // TODO, ugly
1823 return false;
1824 i->op = OP_MOV;
1825 i->subOp = NV50_IR_SUBOP_MOV_FINAL;
1826 i->src(0).set(i->src(1));
1827 i->setSrc(1, NULL);
1828 i->setDef(0, new_LValue(func, FILE_GPR));
1829 i->getDef(0)->reg.data.id = id;
1830
1831 prog->maxGPR = MAX2(prog->maxGPR, id);
1832 } else
1833 if (prog->getType() == Program::TYPE_GEOMETRY) {
1834 i->setIndirect(0, 1, gpEmitAddress);
1835 }
1836 return true;
1837 }
1838
1839 bool
1840 NVC0LoweringPass::handleOUT(Instruction *i)
1841 {
1842 Instruction *prev = i->prev;
1843 ImmediateValue stream, prevStream;
1844
1845 // Only merge if the stream ids match. Also, note that the previous
1846 // instruction would have already been lowered, so we take arg1 from it.
1847 if (i->op == OP_RESTART && prev && prev->op == OP_EMIT &&
1848 i->src(0).getImmediate(stream) &&
1849 prev->src(1).getImmediate(prevStream) &&
1850 stream.reg.data.u32 == prevStream.reg.data.u32) {
1851 i->prev->subOp = NV50_IR_SUBOP_EMIT_RESTART;
1852 delete_Instruction(prog, i);
1853 } else {
1854 assert(gpEmitAddress);
1855 i->setDef(0, gpEmitAddress);
1856 i->setSrc(1, i->getSrc(0));
1857 i->setSrc(0, gpEmitAddress);
1858 }
1859 return true;
1860 }
1861
1862 // Generate a binary predicate if an instruction is predicated by
1863 // e.g. an f32 value.
1864 void
1865 NVC0LoweringPass::checkPredicate(Instruction *insn)
1866 {
1867 Value *pred = insn->getPredicate();
1868 Value *pdst;
1869
1870 if (!pred || pred->reg.file == FILE_PREDICATE)
1871 return;
1872 pdst = new_LValue(func, FILE_PREDICATE);
1873
1874 // CAUTION: don't use pdst->getInsn, the definition might not be unique,
1875 // delay turning PSET(FSET(x,y),0) into PSET(x,y) to a later pass
1876
1877 bld.mkCmp(OP_SET, CC_NEU, insn->dType, pdst, insn->dType, bld.mkImm(0), pred);
1878
1879 insn->setPredicate(insn->cc, pdst);
1880 }
1881
1882 //
1883 // - add quadop dance for texturing
1884 // - put FP outputs in GPRs
1885 // - convert instruction sequences
1886 //
1887 bool
1888 NVC0LoweringPass::visit(Instruction *i)
1889 {
1890 bool ret = true;
1891 bld.setPosition(i, false);
1892
1893 if (i->cc != CC_ALWAYS)
1894 checkPredicate(i);
1895
1896 switch (i->op) {
1897 case OP_TEX:
1898 case OP_TXB:
1899 case OP_TXL:
1900 case OP_TXF:
1901 case OP_TXG:
1902 return handleTEX(i->asTex());
1903 case OP_TXD:
1904 return handleTXD(i->asTex());
1905 case OP_TXLQ:
1906 return handleTXLQ(i->asTex());
1907 case OP_TXQ:
1908 return handleTXQ(i->asTex());
1909 case OP_EX2:
1910 bld.mkOp1(OP_PREEX2, TYPE_F32, i->getDef(0), i->getSrc(0));
1911 i->setSrc(0, i->getDef(0));
1912 break;
1913 case OP_POW:
1914 return handlePOW(i);
1915 case OP_DIV:
1916 return handleDIV(i);
1917 case OP_MOD:
1918 return handleMOD(i);
1919 case OP_SQRT:
1920 return handleSQRT(i);
1921 case OP_EXPORT:
1922 ret = handleEXPORT(i);
1923 break;
1924 case OP_EMIT:
1925 case OP_RESTART:
1926 return handleOUT(i);
1927 case OP_RDSV:
1928 return handleRDSV(i);
1929 case OP_WRSV:
1930 return handleWRSV(i);
1931 case OP_STORE:
1932 case OP_LOAD:
1933 if (i->src(0).getFile() == FILE_SHADER_INPUT) {
1934 if (prog->getType() == Program::TYPE_COMPUTE) {
1935 i->getSrc(0)->reg.file = FILE_MEMORY_CONST;
1936 i->getSrc(0)->reg.fileIndex = 0;
1937 } else
1938 if (prog->getType() == Program::TYPE_GEOMETRY &&
1939 i->src(0).isIndirect(0)) {
1940 // XXX: this assumes vec4 units
1941 Value *ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
1942 i->getIndirect(0, 0), bld.mkImm(4));
1943 i->setIndirect(0, 0, ptr);
1944 i->op = OP_VFETCH;
1945 } else {
1946 i->op = OP_VFETCH;
1947 assert(prog->getType() != Program::TYPE_FRAGMENT); // INTERP
1948 }
1949 } else if (i->src(0).getFile() == FILE_MEMORY_CONST) {
1950 if (i->src(0).isIndirect(1)) {
1951 Value *ptr;
1952 if (i->src(0).isIndirect(0))
1953 ptr = bld.mkOp3v(OP_INSBF, TYPE_U32, bld.getSSA(),
1954 i->getIndirect(0, 1), bld.mkImm(0x1010),
1955 i->getIndirect(0, 0));
1956 else
1957 ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
1958 i->getIndirect(0, 1), bld.mkImm(16));
1959 i->setIndirect(0, 1, NULL);
1960 i->setIndirect(0, 0, ptr);
1961 i->subOp = NV50_IR_SUBOP_LDC_IS;
1962 }
1963 } else if (i->src(0).getFile() == FILE_SHADER_OUTPUT) {
1964 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
1965 i->op = OP_VFETCH;
1966 } else if (i->src(0).getFile() == FILE_MEMORY_GLOBAL) {
1967 Value *ind = i->getIndirect(0, 1);
1968 Value *ptr = loadResInfo64(ind, i->getSrc(0)->reg.fileIndex * 16);
1969 // XXX come up with a way not to do this for EVERY little access but
1970 // rather to batch these up somehow. Unfortunately we've lost the
1971 // information about the field width by the time we get here.
1972 Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
1973 Value *length = loadResLength32(ind, i->getSrc(0)->reg.fileIndex * 16);
1974 Value *pred = new_LValue(func, FILE_PREDICATE);
1975 if (i->src(0).isIndirect(0)) {
1976 bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
1977 bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
1978 }
1979 i->setIndirect(0, 1, NULL);
1980 i->setIndirect(0, 0, ptr);
1981 bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
1982 i->setPredicate(CC_NOT_P, pred);
1983 if (i->defExists(0)) {
1984 bld.mkMov(i->getDef(0), bld.mkImm(0));
1985 }
1986 }
1987 break;
1988 case OP_ATOM:
1989 {
1990 const bool cctl = i->src(0).getFile() == FILE_MEMORY_GLOBAL;
1991 handleATOM(i);
1992 handleCasExch(i, cctl);
1993 }
1994 break;
1995 case OP_SULDB:
1996 case OP_SULDP:
1997 case OP_SUSTB:
1998 case OP_SUSTP:
1999 case OP_SUREDB:
2000 case OP_SUREDP:
2001 if (targ->getChipset() >= NVISA_GK104_CHIPSET)
2002 handleSurfaceOpNVE4(i->asTex());
2003 break;
2004 case OP_SUQ:
2005 handleSUQ(i);
2006 break;
2007 default:
2008 break;
2009 }
2010
2011 /* Kepler+ has a special opcode to compute a new base address to be used
2012 * for indirect loads.
2013 */
2014 if (targ->getChipset() >= NVISA_GK104_CHIPSET && !i->perPatch &&
2015 (i->op == OP_VFETCH || i->op == OP_EXPORT) && i->src(0).isIndirect(0)) {
2016 Instruction *afetch = bld.mkOp1(OP_AFETCH, TYPE_U32, bld.getSSA(),
2017 cloneShallow(func, i->getSrc(0)));
2018 afetch->setIndirect(0, 0, i->getIndirect(0, 0));
2019 i->src(0).get()->reg.data.offset = 0;
2020 i->setIndirect(0, 0, afetch->getDef(0));
2021 }
2022
2023 return ret;
2024 }
2025
2026 bool
2027 TargetNVC0::runLegalizePass(Program *prog, CGStage stage) const
2028 {
2029 if (stage == CG_STAGE_PRE_SSA) {
2030 NVC0LoweringPass pass(prog);
2031 return pass.run(prog, false, true);
2032 } else
2033 if (stage == CG_STAGE_POST_RA) {
2034 NVC0LegalizePostRA pass(prog);
2035 return pass.run(prog, false, true);
2036 } else
2037 if (stage == CG_STAGE_SSA) {
2038 NVC0LegalizeSSA pass;
2039 return pass.run(prog, false, true);
2040 }
2041 return false;
2042 }
2043
2044 } // namespace nv50_ir