71013eb63b50d9aa5efe6bf82f2754d9ea2b2054
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_lowering_nvc0.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_build_util.h"
25
26 #include "codegen/nv50_ir_target_nvc0.h"
27 #include "codegen/nv50_ir_lowering_nvc0.h"
28
29 #include <limits>
30
31 namespace nv50_ir {
32
33 #define QOP_ADD 0
34 #define QOP_SUBR 1
35 #define QOP_SUB 2
36 #define QOP_MOV2 3
37
38 // UL UR LL LR
39 #define QUADOP(q, r, s, t) \
40 ((QOP_##q << 6) | (QOP_##r << 4) | \
41 (QOP_##s << 2) | (QOP_##t << 0))
42
43 void
44 NVC0LegalizeSSA::handleDIV(Instruction *i)
45 {
46 FlowInstruction *call;
47 int builtin;
48 Value *def[2];
49
50 bld.setPosition(i, false);
51 def[0] = bld.mkMovToReg(0, i->getSrc(0))->getDef(0);
52 def[1] = bld.mkMovToReg(1, i->getSrc(1))->getDef(0);
53 switch (i->dType) {
54 case TYPE_U32: builtin = NVC0_BUILTIN_DIV_U32; break;
55 case TYPE_S32: builtin = NVC0_BUILTIN_DIV_S32; break;
56 default:
57 return;
58 }
59 call = bld.mkFlow(OP_CALL, NULL, CC_ALWAYS, NULL);
60 bld.mkMov(i->getDef(0), def[(i->op == OP_DIV) ? 0 : 1]);
61 bld.mkClobber(FILE_GPR, (i->op == OP_DIV) ? 0xe : 0xd, 2);
62 bld.mkClobber(FILE_PREDICATE, (i->dType == TYPE_S32) ? 0xf : 0x3, 0);
63
64 call->fixed = 1;
65 call->absolute = call->builtin = 1;
66 call->target.builtin = builtin;
67 delete_Instruction(prog, i);
68 }
69
70 void
71 NVC0LegalizeSSA::handleRCPRSQ(Instruction *i)
72 {
73 assert(i->dType == TYPE_F64);
74 // There are instructions that will compute the high 32 bits of the 64-bit
75 // float. We will just stick 0 in the bottom 32 bits.
76
77 bld.setPosition(i, false);
78
79 // 1. Take the source and it up.
80 Value *src[2], *dst[2], *def = i->getDef(0);
81 bld.mkSplit(src, 4, i->getSrc(0));
82
83 // 2. We don't care about the low 32 bits of the destination. Stick a 0 in.
84 dst[0] = bld.loadImm(NULL, 0);
85 dst[1] = bld.getSSA();
86
87 // 3. The new version of the instruction takes the high 32 bits of the
88 // source and outputs the high 32 bits of the destination.
89 i->setSrc(0, src[1]);
90 i->setDef(0, dst[1]);
91 i->setType(TYPE_F32);
92 i->subOp = NV50_IR_SUBOP_RCPRSQ_64H;
93
94 // 4. Recombine the two dst pieces back into the original destination.
95 bld.setPosition(i, true);
96 bld.mkOp2(OP_MERGE, TYPE_U64, def, dst[0], dst[1]);
97 }
98
99 void
100 NVC0LegalizeSSA::handleFTZ(Instruction *i)
101 {
102 // Only want to flush float inputs
103 assert(i->sType == TYPE_F32);
104
105 // If we're already flushing denorms (and NaN's) to zero, no need for this.
106 if (i->dnz)
107 return;
108
109 // Only certain classes of operations can flush
110 OpClass cls = prog->getTarget()->getOpClass(i->op);
111 if (cls != OPCLASS_ARITH && cls != OPCLASS_COMPARE &&
112 cls != OPCLASS_CONVERT)
113 return;
114
115 i->ftz = true;
116 }
117
118 bool
119 NVC0LegalizeSSA::visit(Function *fn)
120 {
121 bld.setProgram(fn->getProgram());
122 return true;
123 }
124
125 bool
126 NVC0LegalizeSSA::visit(BasicBlock *bb)
127 {
128 Instruction *next;
129 for (Instruction *i = bb->getEntry(); i; i = next) {
130 next = i->next;
131 if (i->sType == TYPE_F32) {
132 if (prog->getType() != Program::TYPE_COMPUTE)
133 handleFTZ(i);
134 continue;
135 }
136 switch (i->op) {
137 case OP_DIV:
138 case OP_MOD:
139 handleDIV(i);
140 break;
141 case OP_RCP:
142 case OP_RSQ:
143 if (i->dType == TYPE_F64)
144 handleRCPRSQ(i);
145 break;
146 default:
147 break;
148 }
149 }
150 return true;
151 }
152
153 NVC0LegalizePostRA::NVC0LegalizePostRA(const Program *prog)
154 : rZero(NULL),
155 carry(NULL),
156 pOne(NULL),
157 needTexBar(prog->getTarget()->getChipset() >= 0xe0)
158 {
159 }
160
161 bool
162 NVC0LegalizePostRA::insnDominatedBy(const Instruction *later,
163 const Instruction *early) const
164 {
165 if (early->bb == later->bb)
166 return early->serial < later->serial;
167 return later->bb->dominatedBy(early->bb);
168 }
169
170 void
171 NVC0LegalizePostRA::addTexUse(std::list<TexUse> &uses,
172 Instruction *usei, const Instruction *texi)
173 {
174 bool add = true;
175 for (std::list<TexUse>::iterator it = uses.begin();
176 it != uses.end();) {
177 if (insnDominatedBy(usei, it->insn)) {
178 add = false;
179 break;
180 }
181 if (insnDominatedBy(it->insn, usei))
182 it = uses.erase(it);
183 else
184 ++it;
185 }
186 if (add)
187 uses.push_back(TexUse(usei, texi));
188 }
189
190 // While it might be tempting to use the an algorithm that just looks at tex
191 // uses, not all texture results are guaranteed to be used on all paths. In
192 // the case where along some control flow path a texture result is never used,
193 // we might reuse that register for something else, creating a
194 // write-after-write hazard. So we have to manually look through all
195 // instructions looking for ones that reference the registers in question.
196 void
197 NVC0LegalizePostRA::findFirstUses(
198 Instruction *texi, std::list<TexUse> &uses)
199 {
200 int minGPR = texi->def(0).rep()->reg.data.id;
201 int maxGPR = minGPR + texi->def(0).rep()->reg.size / 4 - 1;
202
203 unordered_set<const BasicBlock *> visited;
204 findFirstUsesBB(minGPR, maxGPR, texi->next, texi, uses, visited);
205 }
206
207 void
208 NVC0LegalizePostRA::findFirstUsesBB(
209 int minGPR, int maxGPR, Instruction *start,
210 const Instruction *texi, std::list<TexUse> &uses,
211 unordered_set<const BasicBlock *> &visited)
212 {
213 const BasicBlock *bb = start->bb;
214
215 // We don't process the whole bb the first time around. This is correct,
216 // however we might be in a loop and hit this BB again, and need to process
217 // the full thing. So only mark a bb as visited if we processed it from the
218 // beginning.
219 if (start == bb->getEntry()) {
220 if (visited.find(bb) != visited.end())
221 return;
222 visited.insert(bb);
223 }
224
225 for (Instruction *insn = start; insn != bb->getExit(); insn = insn->next) {
226 if (insn->isNop())
227 continue;
228
229 for (int d = 0; insn->defExists(d); ++d) {
230 const Value *def = insn->def(d).rep();
231 if (insn->def(d).getFile() != FILE_GPR ||
232 def->reg.data.id + def->reg.size / 4 - 1 < minGPR ||
233 def->reg.data.id > maxGPR)
234 continue;
235 addTexUse(uses, insn, texi);
236 return;
237 }
238
239 for (int s = 0; insn->srcExists(s); ++s) {
240 const Value *src = insn->src(s).rep();
241 if (insn->src(s).getFile() != FILE_GPR ||
242 src->reg.data.id + src->reg.size / 4 - 1 < minGPR ||
243 src->reg.data.id > maxGPR)
244 continue;
245 addTexUse(uses, insn, texi);
246 return;
247 }
248 }
249
250 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
251 findFirstUsesBB(minGPR, maxGPR, BasicBlock::get(ei.getNode())->getEntry(),
252 texi, uses, visited);
253 }
254 }
255
256 // Texture barriers:
257 // This pass is a bit long and ugly and can probably be optimized.
258 //
259 // 1. obtain a list of TEXes and their outputs' first use(s)
260 // 2. calculate the barrier level of each first use (minimal number of TEXes,
261 // over all paths, between the TEX and the use in question)
262 // 3. for each barrier, if all paths from the source TEX to that barrier
263 // contain a barrier of lesser level, it can be culled
264 bool
265 NVC0LegalizePostRA::insertTextureBarriers(Function *fn)
266 {
267 std::list<TexUse> *uses;
268 std::vector<Instruction *> texes;
269 std::vector<int> bbFirstTex;
270 std::vector<int> bbFirstUse;
271 std::vector<int> texCounts;
272 std::vector<TexUse> useVec;
273 ArrayList insns;
274
275 fn->orderInstructions(insns);
276
277 texCounts.resize(fn->allBBlocks.getSize(), 0);
278 bbFirstTex.resize(fn->allBBlocks.getSize(), insns.getSize());
279 bbFirstUse.resize(fn->allBBlocks.getSize(), insns.getSize());
280
281 // tag BB CFG nodes by their id for later
282 for (ArrayList::Iterator i = fn->allBBlocks.iterator(); !i.end(); i.next()) {
283 BasicBlock *bb = reinterpret_cast<BasicBlock *>(i.get());
284 if (bb)
285 bb->cfg.tag = bb->getId();
286 }
287
288 // gather the first uses for each TEX
289 for (int i = 0; i < insns.getSize(); ++i) {
290 Instruction *tex = reinterpret_cast<Instruction *>(insns.get(i));
291 if (isTextureOp(tex->op)) {
292 texes.push_back(tex);
293 if (!texCounts.at(tex->bb->getId()))
294 bbFirstTex[tex->bb->getId()] = texes.size() - 1;
295 texCounts[tex->bb->getId()]++;
296 }
297 }
298 insns.clear();
299 if (texes.empty())
300 return false;
301 uses = new std::list<TexUse>[texes.size()];
302 if (!uses)
303 return false;
304 for (size_t i = 0; i < texes.size(); ++i) {
305 findFirstUses(texes[i], uses[i]);
306 }
307
308 // determine the barrier level at each use
309 for (size_t i = 0; i < texes.size(); ++i) {
310 for (std::list<TexUse>::iterator u = uses[i].begin(); u != uses[i].end();
311 ++u) {
312 BasicBlock *tb = texes[i]->bb;
313 BasicBlock *ub = u->insn->bb;
314 if (tb == ub) {
315 u->level = 0;
316 for (size_t j = i + 1; j < texes.size() &&
317 texes[j]->bb == tb && texes[j]->serial < u->insn->serial;
318 ++j)
319 u->level++;
320 } else {
321 u->level = fn->cfg.findLightestPathWeight(&tb->cfg,
322 &ub->cfg, texCounts);
323 if (u->level < 0) {
324 WARN("Failed to find path TEX -> TEXBAR\n");
325 u->level = 0;
326 continue;
327 }
328 // this counted all TEXes in the origin block, correct that
329 u->level -= i - bbFirstTex.at(tb->getId()) + 1 /* this TEX */;
330 // and did not count the TEXes in the destination block, add those
331 for (size_t j = bbFirstTex.at(ub->getId()); j < texes.size() &&
332 texes[j]->bb == ub && texes[j]->serial < u->insn->serial;
333 ++j)
334 u->level++;
335 }
336 assert(u->level >= 0);
337 useVec.push_back(*u);
338 }
339 }
340 delete[] uses;
341
342 // insert the barriers
343 for (size_t i = 0; i < useVec.size(); ++i) {
344 Instruction *prev = useVec[i].insn->prev;
345 if (useVec[i].level < 0)
346 continue;
347 if (prev && prev->op == OP_TEXBAR) {
348 if (prev->subOp > useVec[i].level)
349 prev->subOp = useVec[i].level;
350 prev->setSrc(prev->srcCount(), useVec[i].tex->getDef(0));
351 } else {
352 Instruction *bar = new_Instruction(func, OP_TEXBAR, TYPE_NONE);
353 bar->fixed = 1;
354 bar->subOp = useVec[i].level;
355 // make use explicit to ease latency calculation
356 bar->setSrc(bar->srcCount(), useVec[i].tex->getDef(0));
357 useVec[i].insn->bb->insertBefore(useVec[i].insn, bar);
358 }
359 }
360
361 if (fn->getProgram()->optLevel < 3)
362 return true;
363
364 std::vector<Limits> limitT, limitB, limitS; // entry, exit, single
365
366 limitT.resize(fn->allBBlocks.getSize(), Limits(0, 0));
367 limitB.resize(fn->allBBlocks.getSize(), Limits(0, 0));
368 limitS.resize(fn->allBBlocks.getSize());
369
370 // cull unneeded barriers (should do that earlier, but for simplicity)
371 IteratorRef bi = fn->cfg.iteratorCFG();
372 // first calculate min/max outstanding TEXes for each BB
373 for (bi->reset(); !bi->end(); bi->next()) {
374 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
375 BasicBlock *bb = BasicBlock::get(n);
376 int min = 0;
377 int max = std::numeric_limits<int>::max();
378 for (Instruction *i = bb->getFirst(); i; i = i->next) {
379 if (isTextureOp(i->op)) {
380 min++;
381 if (max < std::numeric_limits<int>::max())
382 max++;
383 } else
384 if (i->op == OP_TEXBAR) {
385 min = MIN2(min, i->subOp);
386 max = MIN2(max, i->subOp);
387 }
388 }
389 // limits when looking at an isolated block
390 limitS[bb->getId()].min = min;
391 limitS[bb->getId()].max = max;
392 }
393 // propagate the min/max values
394 for (unsigned int l = 0; l <= fn->loopNestingBound; ++l) {
395 for (bi->reset(); !bi->end(); bi->next()) {
396 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
397 BasicBlock *bb = BasicBlock::get(n);
398 const int bbId = bb->getId();
399 for (Graph::EdgeIterator ei = n->incident(); !ei.end(); ei.next()) {
400 BasicBlock *in = BasicBlock::get(ei.getNode());
401 const int inId = in->getId();
402 limitT[bbId].min = MAX2(limitT[bbId].min, limitB[inId].min);
403 limitT[bbId].max = MAX2(limitT[bbId].max, limitB[inId].max);
404 }
405 // I just hope this is correct ...
406 if (limitS[bbId].max == std::numeric_limits<int>::max()) {
407 // no barrier
408 limitB[bbId].min = limitT[bbId].min + limitS[bbId].min;
409 limitB[bbId].max = limitT[bbId].max + limitS[bbId].min;
410 } else {
411 // block contained a barrier
412 limitB[bbId].min = MIN2(limitS[bbId].max,
413 limitT[bbId].min + limitS[bbId].min);
414 limitB[bbId].max = MIN2(limitS[bbId].max,
415 limitT[bbId].max + limitS[bbId].min);
416 }
417 }
418 }
419 // finally delete unnecessary barriers
420 for (bi->reset(); !bi->end(); bi->next()) {
421 Graph::Node *n = reinterpret_cast<Graph::Node *>(bi->get());
422 BasicBlock *bb = BasicBlock::get(n);
423 Instruction *prev = NULL;
424 Instruction *next;
425 int max = limitT[bb->getId()].max;
426 for (Instruction *i = bb->getFirst(); i; i = next) {
427 next = i->next;
428 if (i->op == OP_TEXBAR) {
429 if (i->subOp >= max) {
430 delete_Instruction(prog, i);
431 i = NULL;
432 } else {
433 max = i->subOp;
434 if (prev && prev->op == OP_TEXBAR && prev->subOp >= max) {
435 delete_Instruction(prog, prev);
436 prev = NULL;
437 }
438 }
439 } else
440 if (isTextureOp(i->op)) {
441 max++;
442 }
443 if (i && !i->isNop())
444 prev = i;
445 }
446 }
447 return true;
448 }
449
450 bool
451 NVC0LegalizePostRA::visit(Function *fn)
452 {
453 if (needTexBar)
454 insertTextureBarriers(fn);
455
456 rZero = new_LValue(fn, FILE_GPR);
457 pOne = new_LValue(fn, FILE_PREDICATE);
458 carry = new_LValue(fn, FILE_FLAGS);
459
460 rZero->reg.data.id = (prog->getTarget()->getChipset() >= NVISA_GK20A_CHIPSET) ? 255 : 63;
461 carry->reg.data.id = 0;
462 pOne->reg.data.id = 7;
463
464 return true;
465 }
466
467 void
468 NVC0LegalizePostRA::replaceZero(Instruction *i)
469 {
470 for (int s = 0; i->srcExists(s); ++s) {
471 if (s == 2 && i->op == OP_SUCLAMP)
472 continue;
473 ImmediateValue *imm = i->getSrc(s)->asImm();
474 if (imm) {
475 if (i->op == OP_SELP && s == 2) {
476 i->setSrc(s, pOne);
477 if (imm->reg.data.u64 == 0)
478 i->src(s).mod = i->src(s).mod ^ Modifier(NV50_IR_MOD_NOT);
479 } else if (imm->reg.data.u64 == 0) {
480 i->setSrc(s, rZero);
481 }
482 }
483 }
484 }
485
486 // replace CONT with BRA for single unconditional continue
487 bool
488 NVC0LegalizePostRA::tryReplaceContWithBra(BasicBlock *bb)
489 {
490 if (bb->cfg.incidentCount() != 2 || bb->getEntry()->op != OP_PRECONT)
491 return false;
492 Graph::EdgeIterator ei = bb->cfg.incident();
493 if (ei.getType() != Graph::Edge::BACK)
494 ei.next();
495 if (ei.getType() != Graph::Edge::BACK)
496 return false;
497 BasicBlock *contBB = BasicBlock::get(ei.getNode());
498
499 if (!contBB->getExit() || contBB->getExit()->op != OP_CONT ||
500 contBB->getExit()->getPredicate())
501 return false;
502 contBB->getExit()->op = OP_BRA;
503 bb->remove(bb->getEntry()); // delete PRECONT
504
505 ei.next();
506 assert(ei.end() || ei.getType() != Graph::Edge::BACK);
507 return true;
508 }
509
510 // replace branches to join blocks with join ops
511 void
512 NVC0LegalizePostRA::propagateJoin(BasicBlock *bb)
513 {
514 if (bb->getEntry()->op != OP_JOIN || bb->getEntry()->asFlow()->limit)
515 return;
516 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
517 BasicBlock *in = BasicBlock::get(ei.getNode());
518 Instruction *exit = in->getExit();
519 if (!exit) {
520 in->insertTail(new FlowInstruction(func, OP_JOIN, bb));
521 // there should always be a terminator instruction
522 WARN("inserted missing terminator in BB:%i\n", in->getId());
523 } else
524 if (exit->op == OP_BRA) {
525 exit->op = OP_JOIN;
526 exit->asFlow()->limit = 1; // must-not-propagate marker
527 }
528 }
529 bb->remove(bb->getEntry());
530 }
531
532 bool
533 NVC0LegalizePostRA::visit(BasicBlock *bb)
534 {
535 Instruction *i, *next;
536
537 // remove pseudo operations and non-fixed no-ops, split 64 bit operations
538 for (i = bb->getFirst(); i; i = next) {
539 next = i->next;
540 if (i->op == OP_EMIT || i->op == OP_RESTART) {
541 if (!i->getDef(0)->refCount())
542 i->setDef(0, NULL);
543 if (i->src(0).getFile() == FILE_IMMEDIATE)
544 i->setSrc(0, rZero); // initial value must be 0
545 replaceZero(i);
546 } else
547 if (i->isNop()) {
548 bb->remove(i);
549 } else
550 if (i->op == OP_BAR && i->subOp == NV50_IR_SUBOP_BAR_SYNC &&
551 prog->getType() != Program::TYPE_COMPUTE) {
552 // It seems like barriers are never required for tessellation since
553 // the warp size is 32, and there are always at most 32 tcs threads.
554 bb->remove(i);
555 } else
556 if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LDC_IS) {
557 int offset = i->src(0).get()->reg.data.offset;
558 if (abs(offset) > 0x10000)
559 i->src(0).get()->reg.fileIndex += offset >> 16;
560 i->src(0).get()->reg.data.offset = (int)(short)offset;
561 } else {
562 // TODO: Move this to before register allocation for operations that
563 // need the $c register !
564 if (typeSizeof(i->dType) == 8) {
565 Instruction *hi;
566 hi = BuildUtil::split64BitOpPostRA(func, i, rZero, carry);
567 if (hi)
568 next = hi;
569 }
570
571 if (i->op != OP_MOV && i->op != OP_PFETCH)
572 replaceZero(i);
573 }
574 }
575 if (!bb->getEntry())
576 return true;
577
578 if (!tryReplaceContWithBra(bb))
579 propagateJoin(bb);
580
581 return true;
582 }
583
584 NVC0LoweringPass::NVC0LoweringPass(Program *prog) : targ(prog->getTarget())
585 {
586 bld.setProgram(prog);
587 gMemBase = NULL;
588 }
589
590 bool
591 NVC0LoweringPass::visit(Function *fn)
592 {
593 if (prog->getType() == Program::TYPE_GEOMETRY) {
594 assert(!strncmp(fn->getName(), "MAIN", 4));
595 // TODO: when we generate actual functions pass this value along somehow
596 bld.setPosition(BasicBlock::get(fn->cfg.getRoot()), false);
597 gpEmitAddress = bld.loadImm(NULL, 0)->asLValue();
598 if (fn->cfgExit) {
599 bld.setPosition(BasicBlock::get(fn->cfgExit)->getExit(), false);
600 bld.mkMovToReg(0, gpEmitAddress);
601 }
602 }
603 return true;
604 }
605
606 bool
607 NVC0LoweringPass::visit(BasicBlock *bb)
608 {
609 return true;
610 }
611
612 inline Value *
613 NVC0LoweringPass::loadTexHandle(Value *ptr, unsigned int slot)
614 {
615 uint8_t b = prog->driver->io.auxCBSlot;
616 uint32_t off = prog->driver->io.texBindBase + slot * 4;
617 return bld.
618 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
619 }
620
621 // move array source to first slot, convert to u16, add indirections
622 bool
623 NVC0LoweringPass::handleTEX(TexInstruction *i)
624 {
625 const int dim = i->tex.target.getDim() + i->tex.target.isCube();
626 const int arg = i->tex.target.getArgCount();
627 const int lyr = arg - (i->tex.target.isMS() ? 2 : 1);
628 const int chipset = prog->getTarget()->getChipset();
629
630 /* Only normalize in the non-explicit derivatives case. For explicit
631 * derivatives, this is handled in handleManualTXD.
632 */
633 if (i->tex.target.isCube() && i->dPdx[0].get() == NULL) {
634 Value *src[3], *val;
635 int c;
636 for (c = 0; c < 3; ++c)
637 src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), i->getSrc(c));
638 val = bld.getScratch();
639 bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
640 bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
641 bld.mkOp1(OP_RCP, TYPE_F32, val, val);
642 for (c = 0; c < 3; ++c) {
643 i->setSrc(c, bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(),
644 i->getSrc(c), val));
645 }
646 }
647
648 // Arguments to the TEX instruction are a little insane. Even though the
649 // encoding is identical between SM20 and SM30, the arguments mean
650 // different things between Fermi and Kepler+. A lot of arguments are
651 // optional based on flags passed to the instruction. This summarizes the
652 // order of things.
653 //
654 // Fermi:
655 // array/indirect
656 // coords
657 // sample
658 // lod bias
659 // depth compare
660 // offsets:
661 // - tg4: 8 bits each, either 2 (1 offset reg) or 8 (2 offset reg)
662 // - other: 4 bits each, single reg
663 //
664 // Kepler+:
665 // indirect handle
666 // array (+ offsets for txd in upper 16 bits)
667 // coords
668 // sample
669 // lod bias
670 // depth compare
671 // offsets (same as fermi, except txd which takes it with array)
672 //
673 // Maxwell (tex):
674 // array
675 // coords
676 // indirect handle
677 // sample
678 // lod bias
679 // depth compare
680 // offsets
681 //
682 // Maxwell (txd):
683 // indirect handle
684 // coords
685 // array + offsets
686 // derivatives
687
688 if (chipset >= NVISA_GK104_CHIPSET) {
689 if (i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
690 // XXX this ignores tsc, and assumes a 1:1 mapping
691 assert(i->tex.rIndirectSrc >= 0);
692 Value *hnd = loadTexHandle(
693 bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
694 i->getIndirectR(), bld.mkImm(2)),
695 i->tex.r);
696 i->tex.r = 0xff;
697 i->tex.s = 0x1f;
698 i->setIndirectR(hnd);
699 i->setIndirectS(NULL);
700 } else if (i->tex.r == i->tex.s || i->op == OP_TXF) {
701 i->tex.r += prog->driver->io.texBindBase / 4;
702 i->tex.s = 0; // only a single cX[] value possible here
703 } else {
704 Value *hnd = bld.getScratch();
705 Value *rHnd = loadTexHandle(NULL, i->tex.r);
706 Value *sHnd = loadTexHandle(NULL, i->tex.s);
707
708 bld.mkOp3(OP_INSBF, TYPE_U32, hnd, rHnd, bld.mkImm(0x1400), sHnd);
709
710 i->tex.r = 0; // not used for indirect tex
711 i->tex.s = 0;
712 i->setIndirectR(hnd);
713 }
714 if (i->tex.target.isArray()) {
715 LValue *layer = new_LValue(func, FILE_GPR);
716 Value *src = i->getSrc(lyr);
717 const int sat = (i->op == OP_TXF) ? 1 : 0;
718 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
719 bld.mkCvt(OP_CVT, TYPE_U16, layer, sTy, src)->saturate = sat;
720 if (i->op != OP_TXD || chipset < NVISA_GM107_CHIPSET) {
721 for (int s = dim; s >= 1; --s)
722 i->setSrc(s, i->getSrc(s - 1));
723 i->setSrc(0, layer);
724 } else {
725 i->setSrc(dim, layer);
726 }
727 }
728 // Move the indirect reference to the first place
729 if (i->tex.rIndirectSrc >= 0 && (
730 i->op == OP_TXD || chipset < NVISA_GM107_CHIPSET)) {
731 Value *hnd = i->getIndirectR();
732
733 i->setIndirectR(NULL);
734 i->moveSources(0, 1);
735 i->setSrc(0, hnd);
736 i->tex.rIndirectSrc = 0;
737 i->tex.sIndirectSrc = -1;
738 }
739 } else
740 // (nvc0) generate and move the tsc/tic/array source to the front
741 if (i->tex.target.isArray() || i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
742 LValue *src = new_LValue(func, FILE_GPR); // 0xttxsaaaa
743
744 Value *ticRel = i->getIndirectR();
745 Value *tscRel = i->getIndirectS();
746
747 if (ticRel) {
748 i->setSrc(i->tex.rIndirectSrc, NULL);
749 if (i->tex.r)
750 ticRel = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(),
751 ticRel, bld.mkImm(i->tex.r));
752 }
753 if (tscRel) {
754 i->setSrc(i->tex.sIndirectSrc, NULL);
755 if (i->tex.s)
756 tscRel = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(),
757 tscRel, bld.mkImm(i->tex.s));
758 }
759
760 Value *arrayIndex = i->tex.target.isArray() ? i->getSrc(lyr) : NULL;
761 if (arrayIndex) {
762 for (int s = dim; s >= 1; --s)
763 i->setSrc(s, i->getSrc(s - 1));
764 i->setSrc(0, arrayIndex);
765 } else {
766 i->moveSources(0, 1);
767 }
768
769 if (arrayIndex) {
770 int sat = (i->op == OP_TXF) ? 1 : 0;
771 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32;
772 bld.mkCvt(OP_CVT, TYPE_U16, src, sTy, arrayIndex)->saturate = sat;
773 } else {
774 bld.loadImm(src, 0);
775 }
776
777 if (ticRel)
778 bld.mkOp3(OP_INSBF, TYPE_U32, src, ticRel, bld.mkImm(0x0917), src);
779 if (tscRel)
780 bld.mkOp3(OP_INSBF, TYPE_U32, src, tscRel, bld.mkImm(0x0710), src);
781
782 i->setSrc(0, src);
783 }
784
785 // For nvc0, the sample id has to be in the second operand, as the offset
786 // does. Right now we don't know how to pass both in, and this case can't
787 // happen with OpenGL. On nve0, the sample id is part of the texture
788 // coordinate argument.
789 assert(chipset >= NVISA_GK104_CHIPSET ||
790 !i->tex.useOffsets || !i->tex.target.isMS());
791
792 // offset is between lod and dc
793 if (i->tex.useOffsets) {
794 int n, c;
795 int s = i->srcCount(0xff, true);
796 if (i->op != OP_TXD || chipset < NVISA_GK104_CHIPSET) {
797 if (i->tex.target.isShadow())
798 s--;
799 if (i->srcExists(s)) // move potential predicate out of the way
800 i->moveSources(s, 1);
801 if (i->tex.useOffsets == 4 && i->srcExists(s + 1))
802 i->moveSources(s + 1, 1);
803 }
804 if (i->op == OP_TXG) {
805 // Either there is 1 offset, which goes into the 2 low bytes of the
806 // first source, or there are 4 offsets, which go into 2 sources (8
807 // values, 1 byte each).
808 Value *offs[2] = {NULL, NULL};
809 for (n = 0; n < i->tex.useOffsets; n++) {
810 for (c = 0; c < 2; ++c) {
811 if ((n % 2) == 0 && c == 0)
812 offs[n / 2] = i->offset[n][c].get();
813 else
814 bld.mkOp3(OP_INSBF, TYPE_U32,
815 offs[n / 2],
816 i->offset[n][c].get(),
817 bld.mkImm(0x800 | ((n * 16 + c * 8) % 32)),
818 offs[n / 2]);
819 }
820 }
821 i->setSrc(s, offs[0]);
822 if (offs[1])
823 i->setSrc(s + 1, offs[1]);
824 } else {
825 unsigned imm = 0;
826 assert(i->tex.useOffsets == 1);
827 for (c = 0; c < 3; ++c) {
828 ImmediateValue val;
829 if (!i->offset[0][c].getImmediate(val))
830 assert(!"non-immediate offset passed to non-TXG");
831 imm |= (val.reg.data.u32 & 0xf) << (c * 4);
832 }
833 if (i->op == OP_TXD && chipset >= NVISA_GK104_CHIPSET) {
834 // The offset goes into the upper 16 bits of the array index. So
835 // create it if it's not already there, and INSBF it if it already
836 // is.
837 s = (i->tex.rIndirectSrc >= 0) ? 1 : 0;
838 if (chipset >= NVISA_GM107_CHIPSET)
839 s += dim;
840 if (i->tex.target.isArray()) {
841 bld.mkOp3(OP_INSBF, TYPE_U32, i->getSrc(s),
842 bld.loadImm(NULL, imm), bld.mkImm(0xc10),
843 i->getSrc(s));
844 } else {
845 i->moveSources(s, 1);
846 i->setSrc(s, bld.loadImm(NULL, imm << 16));
847 }
848 } else {
849 i->setSrc(s, bld.loadImm(NULL, imm));
850 }
851 }
852 }
853
854 if (chipset >= NVISA_GK104_CHIPSET) {
855 //
856 // If TEX requires more than 4 sources, the 2nd register tuple must be
857 // aligned to 4, even if it consists of just a single 4-byte register.
858 //
859 // XXX HACK: We insert 0 sources to avoid the 5 or 6 regs case.
860 //
861 int s = i->srcCount(0xff, true);
862 if (s > 4 && s < 7) {
863 if (i->srcExists(s)) // move potential predicate out of the way
864 i->moveSources(s, 7 - s);
865 while (s < 7)
866 i->setSrc(s++, bld.loadImm(NULL, 0));
867 }
868 }
869
870 return true;
871 }
872
873 bool
874 NVC0LoweringPass::handleManualTXD(TexInstruction *i)
875 {
876 static const uint8_t qOps[4][2] =
877 {
878 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(MOV2, MOV2, ADD, ADD) }, // l0
879 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(MOV2, MOV2, ADD, ADD) }, // l1
880 { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l2
881 { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l3
882 };
883 Value *def[4][4];
884 Value *crd[3];
885 Instruction *tex;
886 Value *zero = bld.loadImm(bld.getSSA(), 0);
887 int l, c;
888 const int dim = i->tex.target.getDim() + i->tex.target.isCube();
889
890 // This function is invoked after handleTEX lowering, so we have to expect
891 // the arguments in the order that the hw wants them. For Fermi, array and
892 // indirect are both in the leading arg, while for Kepler, array and
893 // indirect are separate (and both precede the coordinates). Maxwell is
894 // handled in a separate function.
895 unsigned array;
896 if (targ->getChipset() < NVISA_GK104_CHIPSET)
897 array = i->tex.target.isArray() || i->tex.rIndirectSrc >= 0;
898 else
899 array = i->tex.target.isArray() + (i->tex.rIndirectSrc >= 0);
900
901 i->op = OP_TEX; // no need to clone dPdx/dPdy later
902
903 for (c = 0; c < dim; ++c)
904 crd[c] = bld.getScratch();
905
906 bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
907 for (l = 0; l < 4; ++l) {
908 Value *src[3], *val;
909 // mov coordinates from lane l to all lanes
910 for (c = 0; c < dim; ++c)
911 bld.mkQuadop(0x00, crd[c], l, i->getSrc(c + array), zero);
912 // add dPdx from lane l to lanes dx
913 for (c = 0; c < dim; ++c)
914 bld.mkQuadop(qOps[l][0], crd[c], l, i->dPdx[c].get(), crd[c]);
915 // add dPdy from lane l to lanes dy
916 for (c = 0; c < dim; ++c)
917 bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
918 // normalize cube coordinates
919 if (i->tex.target.isCube()) {
920 for (c = 0; c < 3; ++c)
921 src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), crd[c]);
922 val = bld.getScratch();
923 bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
924 bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
925 bld.mkOp1(OP_RCP, TYPE_F32, val, val);
926 for (c = 0; c < 3; ++c)
927 src[c] = bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(), crd[c], val);
928 } else {
929 for (c = 0; c < dim; ++c)
930 src[c] = crd[c];
931 }
932 // texture
933 bld.insert(tex = cloneForward(func, i));
934 for (c = 0; c < dim; ++c)
935 tex->setSrc(c + array, src[c]);
936 // save results
937 for (c = 0; i->defExists(c); ++c) {
938 Instruction *mov;
939 def[c][l] = bld.getSSA();
940 mov = bld.mkMov(def[c][l], tex->getDef(c));
941 mov->fixed = 1;
942 mov->lanes = 1 << l;
943 }
944 }
945 bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
946
947 for (c = 0; i->defExists(c); ++c) {
948 Instruction *u = bld.mkOp(OP_UNION, TYPE_U32, i->getDef(c));
949 for (l = 0; l < 4; ++l)
950 u->setSrc(l, def[c][l]);
951 }
952
953 i->bb->remove(i);
954 return true;
955 }
956
957 bool
958 NVC0LoweringPass::handleTXD(TexInstruction *txd)
959 {
960 int dim = txd->tex.target.getDim() + txd->tex.target.isCube();
961 unsigned arg = txd->tex.target.getArgCount();
962 unsigned expected_args = arg;
963 const int chipset = prog->getTarget()->getChipset();
964
965 if (chipset >= NVISA_GK104_CHIPSET) {
966 if (!txd->tex.target.isArray() && txd->tex.useOffsets)
967 expected_args++;
968 if (txd->tex.rIndirectSrc >= 0 || txd->tex.sIndirectSrc >= 0)
969 expected_args++;
970 } else {
971 if (txd->tex.useOffsets)
972 expected_args++;
973 if (!txd->tex.target.isArray() && (
974 txd->tex.rIndirectSrc >= 0 || txd->tex.sIndirectSrc >= 0))
975 expected_args++;
976 }
977
978 if (expected_args > 4 ||
979 dim > 2 ||
980 txd->tex.target.isShadow())
981 txd->op = OP_TEX;
982
983 handleTEX(txd);
984 while (txd->srcExists(arg))
985 ++arg;
986
987 txd->tex.derivAll = true;
988 if (txd->op == OP_TEX)
989 return handleManualTXD(txd);
990
991 assert(arg == expected_args);
992 for (int c = 0; c < dim; ++c) {
993 txd->setSrc(arg + c * 2 + 0, txd->dPdx[c]);
994 txd->setSrc(arg + c * 2 + 1, txd->dPdy[c]);
995 txd->dPdx[c].set(NULL);
996 txd->dPdy[c].set(NULL);
997 }
998
999 // In this case we have fewer than 4 "real" arguments, which means that
1000 // handleTEX didn't apply any padding. However we have to make sure that
1001 // the second "group" of arguments still gets padded up to 4.
1002 if (chipset >= NVISA_GK104_CHIPSET) {
1003 int s = arg + 2 * dim;
1004 if (s >= 4 && s < 7) {
1005 if (txd->srcExists(s)) // move potential predicate out of the way
1006 txd->moveSources(s, 7 - s);
1007 while (s < 7)
1008 txd->setSrc(s++, bld.loadImm(NULL, 0));
1009 }
1010 }
1011
1012 return true;
1013 }
1014
1015 bool
1016 NVC0LoweringPass::handleTXQ(TexInstruction *txq)
1017 {
1018 const int chipset = prog->getTarget()->getChipset();
1019 if (chipset >= NVISA_GK104_CHIPSET && txq->tex.rIndirectSrc < 0)
1020 txq->tex.r += prog->driver->io.texBindBase / 4;
1021
1022 if (txq->tex.rIndirectSrc < 0)
1023 return true;
1024
1025 Value *ticRel = txq->getIndirectR();
1026
1027 txq->setIndirectS(NULL);
1028 txq->tex.sIndirectSrc = -1;
1029
1030 assert(ticRel);
1031
1032 if (chipset < NVISA_GK104_CHIPSET) {
1033 LValue *src = new_LValue(func, FILE_GPR); // 0xttxsaaaa
1034
1035 txq->setSrc(txq->tex.rIndirectSrc, NULL);
1036 if (txq->tex.r)
1037 ticRel = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(),
1038 ticRel, bld.mkImm(txq->tex.r));
1039
1040 bld.mkOp2(OP_SHL, TYPE_U32, src, ticRel, bld.mkImm(0x17));
1041
1042 txq->moveSources(0, 1);
1043 txq->setSrc(0, src);
1044 } else {
1045 Value *hnd = loadTexHandle(
1046 bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
1047 txq->getIndirectR(), bld.mkImm(2)),
1048 txq->tex.r);
1049 txq->tex.r = 0xff;
1050 txq->tex.s = 0x1f;
1051
1052 txq->setIndirectR(NULL);
1053 txq->moveSources(0, 1);
1054 txq->setSrc(0, hnd);
1055 txq->tex.rIndirectSrc = 0;
1056 }
1057
1058 return true;
1059 }
1060
1061 bool
1062 NVC0LoweringPass::handleTXLQ(TexInstruction *i)
1063 {
1064 /* The outputs are inverted compared to what the TGSI instruction
1065 * expects. Take that into account in the mask.
1066 */
1067 assert((i->tex.mask & ~3) == 0);
1068 if (i->tex.mask == 1)
1069 i->tex.mask = 2;
1070 else if (i->tex.mask == 2)
1071 i->tex.mask = 1;
1072 handleTEX(i);
1073 bld.setPosition(i, true);
1074
1075 /* The returned values are not quite what we want:
1076 * (a) convert from s16/u16 to f32
1077 * (b) multiply by 1/256
1078 */
1079 for (int def = 0; def < 2; ++def) {
1080 if (!i->defExists(def))
1081 continue;
1082 enum DataType type = TYPE_S16;
1083 if (i->tex.mask == 2 || def > 0)
1084 type = TYPE_U16;
1085 bld.mkCvt(OP_CVT, TYPE_F32, i->getDef(def), type, i->getDef(def));
1086 bld.mkOp2(OP_MUL, TYPE_F32, i->getDef(def),
1087 i->getDef(def), bld.loadImm(NULL, 1.0f / 256));
1088 }
1089 if (i->tex.mask == 3) {
1090 LValue *t = new_LValue(func, FILE_GPR);
1091 bld.mkMov(t, i->getDef(0));
1092 bld.mkMov(i->getDef(0), i->getDef(1));
1093 bld.mkMov(i->getDef(1), t);
1094 }
1095 return true;
1096 }
1097
1098 bool
1099 NVC0LoweringPass::handleBUFQ(Instruction *bufq)
1100 {
1101 bufq->op = OP_MOV;
1102 bufq->setSrc(0, loadBufLength32(bufq->getIndirect(0, 1),
1103 bufq->getSrc(0)->reg.fileIndex * 16));
1104 bufq->setIndirect(0, 0, NULL);
1105 bufq->setIndirect(0, 1, NULL);
1106 return true;
1107 }
1108
1109 void
1110 NVC0LoweringPass::handleSharedATOMNVE4(Instruction *atom)
1111 {
1112 assert(atom->src(0).getFile() == FILE_MEMORY_SHARED);
1113
1114 BasicBlock *currBB = atom->bb;
1115 BasicBlock *tryLockBB = atom->bb->splitBefore(atom, false);
1116 BasicBlock *joinBB = atom->bb->splitAfter(atom);
1117 BasicBlock *setAndUnlockBB = new BasicBlock(func);
1118 BasicBlock *failLockBB = new BasicBlock(func);
1119
1120 bld.setPosition(currBB, true);
1121 assert(!currBB->joinAt);
1122 currBB->joinAt = bld.mkFlow(OP_JOINAT, joinBB, CC_ALWAYS, NULL);
1123
1124 CmpInstruction *pred =
1125 bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
1126 TYPE_U32, bld.mkImm(0), bld.mkImm(1));
1127
1128 bld.mkFlow(OP_BRA, tryLockBB, CC_ALWAYS, NULL);
1129 currBB->cfg.attach(&tryLockBB->cfg, Graph::Edge::TREE);
1130
1131 bld.setPosition(tryLockBB, true);
1132
1133 Instruction *ld =
1134 bld.mkLoad(TYPE_U32, atom->getDef(0), atom->getSrc(0)->asSym(),
1135 atom->getIndirect(0, 0));
1136 ld->setDef(1, bld.getSSA(1, FILE_PREDICATE));
1137 ld->subOp = NV50_IR_SUBOP_LOAD_LOCKED;
1138
1139 bld.mkFlow(OP_BRA, setAndUnlockBB, CC_P, ld->getDef(1));
1140 bld.mkFlow(OP_BRA, failLockBB, CC_ALWAYS, NULL);
1141 tryLockBB->cfg.attach(&failLockBB->cfg, Graph::Edge::CROSS);
1142 tryLockBB->cfg.attach(&setAndUnlockBB->cfg, Graph::Edge::TREE);
1143
1144 tryLockBB->cfg.detach(&joinBB->cfg);
1145 bld.remove(atom);
1146
1147 bld.setPosition(setAndUnlockBB, true);
1148 Value *stVal;
1149 if (atom->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
1150 // Read the old value, and write the new one.
1151 stVal = atom->getSrc(1);
1152 } else if (atom->subOp == NV50_IR_SUBOP_ATOM_CAS) {
1153 CmpInstruction *set =
1154 bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(),
1155 TYPE_U32, ld->getDef(0), atom->getSrc(1));
1156
1157 bld.mkCmp(OP_SLCT, CC_NE, TYPE_U32, (stVal = bld.getSSA()),
1158 TYPE_U32, atom->getSrc(2), ld->getDef(0), set->getDef(0));
1159 } else {
1160 operation op;
1161
1162 switch (atom->subOp) {
1163 case NV50_IR_SUBOP_ATOM_ADD:
1164 op = OP_ADD;
1165 break;
1166 case NV50_IR_SUBOP_ATOM_AND:
1167 op = OP_AND;
1168 break;
1169 case NV50_IR_SUBOP_ATOM_OR:
1170 op = OP_OR;
1171 break;
1172 case NV50_IR_SUBOP_ATOM_XOR:
1173 op = OP_XOR;
1174 break;
1175 case NV50_IR_SUBOP_ATOM_MIN:
1176 op = OP_MIN;
1177 break;
1178 case NV50_IR_SUBOP_ATOM_MAX:
1179 op = OP_MAX;
1180 break;
1181 default:
1182 assert(0);
1183 return;
1184 }
1185
1186 stVal = bld.mkOp2v(op, atom->dType, bld.getSSA(), ld->getDef(0),
1187 atom->getSrc(1));
1188 }
1189
1190 Instruction *st =
1191 bld.mkStore(OP_STORE, TYPE_U32, atom->getSrc(0)->asSym(),
1192 atom->getIndirect(0, 0), stVal);
1193 st->setDef(0, pred->getDef(0));
1194 st->subOp = NV50_IR_SUBOP_STORE_UNLOCKED;
1195
1196 bld.mkFlow(OP_BRA, failLockBB, CC_ALWAYS, NULL);
1197 setAndUnlockBB->cfg.attach(&failLockBB->cfg, Graph::Edge::TREE);
1198
1199 // Lock until the store has not been performed.
1200 bld.setPosition(failLockBB, true);
1201 bld.mkFlow(OP_BRA, tryLockBB, CC_NOT_P, pred->getDef(0));
1202 bld.mkFlow(OP_BRA, joinBB, CC_ALWAYS, NULL);
1203 failLockBB->cfg.attach(&tryLockBB->cfg, Graph::Edge::BACK);
1204 failLockBB->cfg.attach(&joinBB->cfg, Graph::Edge::TREE);
1205
1206 bld.setPosition(joinBB, false);
1207 bld.mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1208 }
1209
1210 void
1211 NVC0LoweringPass::handleSharedATOM(Instruction *atom)
1212 {
1213 assert(atom->src(0).getFile() == FILE_MEMORY_SHARED);
1214
1215 BasicBlock *currBB = atom->bb;
1216 BasicBlock *tryLockAndSetBB = atom->bb->splitBefore(atom, false);
1217 BasicBlock *joinBB = atom->bb->splitAfter(atom);
1218
1219 bld.setPosition(currBB, true);
1220 assert(!currBB->joinAt);
1221 currBB->joinAt = bld.mkFlow(OP_JOINAT, joinBB, CC_ALWAYS, NULL);
1222
1223 bld.mkFlow(OP_BRA, tryLockAndSetBB, CC_ALWAYS, NULL);
1224 currBB->cfg.attach(&tryLockAndSetBB->cfg, Graph::Edge::TREE);
1225
1226 bld.setPosition(tryLockAndSetBB, true);
1227
1228 Instruction *ld =
1229 bld.mkLoad(TYPE_U32, atom->getDef(0), atom->getSrc(0)->asSym(),
1230 atom->getIndirect(0, 0));
1231 ld->setDef(1, bld.getSSA(1, FILE_PREDICATE));
1232 ld->subOp = NV50_IR_SUBOP_LOAD_LOCKED;
1233
1234 Value *stVal;
1235 if (atom->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
1236 // Read the old value, and write the new one.
1237 stVal = atom->getSrc(1);
1238 } else if (atom->subOp == NV50_IR_SUBOP_ATOM_CAS) {
1239 CmpInstruction *set =
1240 bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
1241 TYPE_U32, ld->getDef(0), atom->getSrc(1));
1242 set->setPredicate(CC_P, ld->getDef(1));
1243
1244 Instruction *selp =
1245 bld.mkOp3(OP_SELP, TYPE_U32, bld.getSSA(), ld->getDef(0),
1246 atom->getSrc(2), set->getDef(0));
1247 selp->src(2).mod = Modifier(NV50_IR_MOD_NOT);
1248 selp->setPredicate(CC_P, ld->getDef(1));
1249
1250 stVal = selp->getDef(0);
1251 } else {
1252 operation op;
1253
1254 switch (atom->subOp) {
1255 case NV50_IR_SUBOP_ATOM_ADD:
1256 op = OP_ADD;
1257 break;
1258 case NV50_IR_SUBOP_ATOM_AND:
1259 op = OP_AND;
1260 break;
1261 case NV50_IR_SUBOP_ATOM_OR:
1262 op = OP_OR;
1263 break;
1264 case NV50_IR_SUBOP_ATOM_XOR:
1265 op = OP_XOR;
1266 break;
1267 case NV50_IR_SUBOP_ATOM_MIN:
1268 op = OP_MIN;
1269 break;
1270 case NV50_IR_SUBOP_ATOM_MAX:
1271 op = OP_MAX;
1272 break;
1273 default:
1274 assert(0);
1275 return;
1276 }
1277
1278 Instruction *i =
1279 bld.mkOp2(op, atom->dType, bld.getSSA(), ld->getDef(0),
1280 atom->getSrc(1));
1281 i->setPredicate(CC_P, ld->getDef(1));
1282
1283 stVal = i->getDef(0);
1284 }
1285
1286 Instruction *st =
1287 bld.mkStore(OP_STORE, TYPE_U32, atom->getSrc(0)->asSym(),
1288 atom->getIndirect(0, 0), stVal);
1289 st->setPredicate(CC_P, ld->getDef(1));
1290 st->subOp = NV50_IR_SUBOP_STORE_UNLOCKED;
1291
1292 // Loop until the lock is acquired.
1293 bld.mkFlow(OP_BRA, tryLockAndSetBB, CC_NOT_P, ld->getDef(1));
1294 tryLockAndSetBB->cfg.attach(&tryLockAndSetBB->cfg, Graph::Edge::BACK);
1295 tryLockAndSetBB->cfg.attach(&joinBB->cfg, Graph::Edge::CROSS);
1296 bld.mkFlow(OP_BRA, joinBB, CC_ALWAYS, NULL);
1297
1298 bld.remove(atom);
1299
1300 bld.setPosition(joinBB, false);
1301 bld.mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1302 }
1303
1304 bool
1305 NVC0LoweringPass::handleATOM(Instruction *atom)
1306 {
1307 SVSemantic sv;
1308 Value *ptr = atom->getIndirect(0, 0), *ind = atom->getIndirect(0, 1), *base;
1309
1310 switch (atom->src(0).getFile()) {
1311 case FILE_MEMORY_LOCAL:
1312 sv = SV_LBASE;
1313 break;
1314 case FILE_MEMORY_SHARED:
1315 // For Fermi/Kepler, we have to use ld lock/st unlock to perform atomic
1316 // operations on shared memory. For Maxwell, ATOMS is enough.
1317 if (targ->getChipset() < NVISA_GK104_CHIPSET)
1318 handleSharedATOM(atom);
1319 else if (targ->getChipset() < NVISA_GM107_CHIPSET)
1320 handleSharedATOMNVE4(atom);
1321 return true;
1322 default:
1323 assert(atom->src(0).getFile() == FILE_MEMORY_BUFFER);
1324 base = loadBufInfo64(ind, atom->getSrc(0)->reg.fileIndex * 16);
1325 assert(base->reg.size == 8);
1326 if (ptr)
1327 base = bld.mkOp2v(OP_ADD, TYPE_U64, base, base, ptr);
1328 assert(base->reg.size == 8);
1329 atom->setIndirect(0, 0, base);
1330 atom->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
1331
1332 // Harden against out-of-bounds accesses
1333 Value *offset = bld.loadImm(NULL, atom->getSrc(0)->reg.data.offset + typeSizeof(atom->sType));
1334 Value *length = loadBufLength32(ind, atom->getSrc(0)->reg.fileIndex * 16);
1335 Value *pred = new_LValue(func, FILE_PREDICATE);
1336 if (ptr)
1337 bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, ptr);
1338 bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
1339 atom->setPredicate(CC_NOT_P, pred);
1340 if (atom->defExists(0)) {
1341 Value *zero, *dst = atom->getDef(0);
1342 atom->setDef(0, bld.getSSA());
1343
1344 bld.setPosition(atom, true);
1345 bld.mkMov((zero = bld.getSSA()), bld.mkImm(0))
1346 ->setPredicate(CC_P, pred);
1347 bld.mkOp2(OP_UNION, TYPE_U32, dst, atom->getDef(0), zero);
1348 }
1349
1350 return true;
1351 }
1352 base =
1353 bld.mkOp1v(OP_RDSV, TYPE_U32, bld.getScratch(), bld.mkSysVal(sv, 0));
1354
1355 atom->setSrc(0, cloneShallow(func, atom->getSrc(0)));
1356 atom->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
1357 if (ptr)
1358 base = bld.mkOp2v(OP_ADD, TYPE_U32, base, base, ptr);
1359 atom->setIndirect(0, 1, NULL);
1360 atom->setIndirect(0, 0, base);
1361
1362 return true;
1363 }
1364
1365 bool
1366 NVC0LoweringPass::handleCasExch(Instruction *cas, bool needCctl)
1367 {
1368 if (targ->getChipset() < NVISA_GM107_CHIPSET) {
1369 if (cas->src(0).getFile() == FILE_MEMORY_SHARED) {
1370 // ATOM_CAS and ATOM_EXCH are handled in handleSharedATOM().
1371 return false;
1372 }
1373 }
1374
1375 if (cas->subOp != NV50_IR_SUBOP_ATOM_CAS &&
1376 cas->subOp != NV50_IR_SUBOP_ATOM_EXCH)
1377 return false;
1378 bld.setPosition(cas, true);
1379
1380 if (needCctl) {
1381 Instruction *cctl = bld.mkOp1(OP_CCTL, TYPE_NONE, NULL, cas->getSrc(0));
1382 cctl->setIndirect(0, 0, cas->getIndirect(0, 0));
1383 cctl->fixed = 1;
1384 cctl->subOp = NV50_IR_SUBOP_CCTL_IV;
1385 if (cas->isPredicated())
1386 cctl->setPredicate(cas->cc, cas->getPredicate());
1387 }
1388
1389 if (cas->subOp == NV50_IR_SUBOP_ATOM_CAS) {
1390 // CAS is crazy. It's 2nd source is a double reg, and the 3rd source
1391 // should be set to the high part of the double reg or bad things will
1392 // happen elsewhere in the universe.
1393 // Also, it sometimes returns the new value instead of the old one
1394 // under mysterious circumstances.
1395 Value *dreg = bld.getSSA(8);
1396 bld.setPosition(cas, false);
1397 bld.mkOp2(OP_MERGE, TYPE_U64, dreg, cas->getSrc(1), cas->getSrc(2));
1398 cas->setSrc(1, dreg);
1399 cas->setSrc(2, dreg);
1400 }
1401
1402 return true;
1403 }
1404
1405 inline Value *
1406 NVC0LoweringPass::loadResInfo32(Value *ptr, uint32_t off, uint16_t base)
1407 {
1408 uint8_t b = prog->driver->io.auxCBSlot;
1409 off += base;
1410
1411 return bld.
1412 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
1413 }
1414
1415 inline Value *
1416 NVC0LoweringPass::loadResInfo64(Value *ptr, uint32_t off, uint16_t base)
1417 {
1418 uint8_t b = prog->driver->io.auxCBSlot;
1419 off += base;
1420
1421 if (ptr)
1422 ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(), ptr, bld.mkImm(4));
1423
1424 return bld.
1425 mkLoadv(TYPE_U64, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U64, off), ptr);
1426 }
1427
1428 inline Value *
1429 NVC0LoweringPass::loadResLength32(Value *ptr, uint32_t off, uint16_t base)
1430 {
1431 uint8_t b = prog->driver->io.auxCBSlot;
1432 off += base;
1433
1434 if (ptr)
1435 ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(), ptr, bld.mkImm(4));
1436
1437 return bld.
1438 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U64, off + 8), ptr);
1439 }
1440
1441 inline Value *
1442 NVC0LoweringPass::loadSuInfo32(Value *ptr, uint32_t off)
1443 {
1444 return loadResInfo32(ptr, off, prog->driver->io.suInfoBase);
1445 }
1446
1447 inline Value *
1448 NVC0LoweringPass::loadSuInfo64(Value *ptr, uint32_t off)
1449 {
1450 return loadResInfo64(ptr, off, prog->driver->io.suInfoBase);
1451 }
1452
1453 inline Value *
1454 NVC0LoweringPass::loadSuLength32(Value *ptr, uint32_t off)
1455 {
1456 return loadResLength32(ptr, off, prog->driver->io.suInfoBase);
1457 }
1458
1459 inline Value *
1460 NVC0LoweringPass::loadBufInfo32(Value *ptr, uint32_t off)
1461 {
1462 return loadResInfo32(ptr, off, prog->driver->io.bufInfoBase);
1463 }
1464
1465 inline Value *
1466 NVC0LoweringPass::loadBufInfo64(Value *ptr, uint32_t off)
1467 {
1468 return loadResInfo64(ptr, off, prog->driver->io.bufInfoBase);
1469 }
1470
1471 inline Value *
1472 NVC0LoweringPass::loadBufLength32(Value *ptr, uint32_t off)
1473 {
1474 return loadResLength32(ptr, off, prog->driver->io.bufInfoBase);
1475 }
1476
1477 inline Value *
1478 NVC0LoweringPass::loadUboInfo32(Value *ptr, uint32_t off)
1479 {
1480 return loadResInfo32(ptr, off, prog->driver->io.uboInfoBase);
1481 }
1482
1483 inline Value *
1484 NVC0LoweringPass::loadUboInfo64(Value *ptr, uint32_t off)
1485 {
1486 return loadResInfo64(ptr, off, prog->driver->io.uboInfoBase);
1487 }
1488
1489 inline Value *
1490 NVC0LoweringPass::loadUboLength32(Value *ptr, uint32_t off)
1491 {
1492 return loadResLength32(ptr, off, prog->driver->io.uboInfoBase);
1493 }
1494
1495 inline Value *
1496 NVC0LoweringPass::loadMsInfo32(Value *ptr, uint32_t off)
1497 {
1498 uint8_t b = prog->driver->io.msInfoCBSlot;
1499 off += prog->driver->io.msInfoBase;
1500 return bld.
1501 mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
1502 }
1503
1504 /* On nvc0, surface info is obtained via the surface binding points passed
1505 * to the SULD/SUST instructions.
1506 * On nve4, surface info is stored in c[] and is used by various special
1507 * instructions, e.g. for clamping coordiantes or generating an address.
1508 * They couldn't just have added an equivalent to TIC now, couldn't they ?
1509 */
1510 #define NVE4_SU_INFO_ADDR 0x00
1511 #define NVE4_SU_INFO_FMT 0x04
1512 #define NVE4_SU_INFO_DIM_X 0x08
1513 #define NVE4_SU_INFO_PITCH 0x0c
1514 #define NVE4_SU_INFO_DIM_Y 0x10
1515 #define NVE4_SU_INFO_ARRAY 0x14
1516 #define NVE4_SU_INFO_DIM_Z 0x18
1517 #define NVE4_SU_INFO_UNK1C 0x1c
1518 #define NVE4_SU_INFO_WIDTH 0x20
1519 #define NVE4_SU_INFO_HEIGHT 0x24
1520 #define NVE4_SU_INFO_DEPTH 0x28
1521 #define NVE4_SU_INFO_TARGET 0x2c
1522 #define NVE4_SU_INFO_BSIZE 0x30
1523 #define NVE4_SU_INFO_RAW_X 0x34
1524 #define NVE4_SU_INFO_MS_X 0x38
1525 #define NVE4_SU_INFO_MS_Y 0x3c
1526
1527 #define NVE4_SU_INFO__STRIDE 0x40
1528
1529 #define NVE4_SU_INFO_DIM(i) (0x08 + (i) * 8)
1530 #define NVE4_SU_INFO_SIZE(i) (0x20 + (i) * 4)
1531 #define NVE4_SU_INFO_MS(i) (0x38 + (i) * 4)
1532
1533 static inline uint16_t getSuClampSubOp(const TexInstruction *su, int c)
1534 {
1535 switch (su->tex.target.getEnum()) {
1536 case TEX_TARGET_BUFFER: return NV50_IR_SUBOP_SUCLAMP_PL(0, 1);
1537 case TEX_TARGET_RECT: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1538 case TEX_TARGET_1D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1539 case TEX_TARGET_1D_ARRAY: return (c == 1) ?
1540 NV50_IR_SUBOP_SUCLAMP_PL(0, 2) :
1541 NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1542 case TEX_TARGET_2D: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1543 case TEX_TARGET_2D_MS: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1544 case TEX_TARGET_2D_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1545 case TEX_TARGET_2D_MS_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1546 case TEX_TARGET_3D: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1547 case TEX_TARGET_CUBE: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1548 case TEX_TARGET_CUBE_ARRAY: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1549 default:
1550 assert(0);
1551 return 0;
1552 }
1553 }
1554
1555 bool
1556 NVC0LoweringPass::handleSUQ(TexInstruction *suq)
1557 {
1558 int dim = suq->tex.target.getDim();
1559 int arg = dim + (suq->tex.target.isArray() || suq->tex.target.isCube());
1560 uint8_t s = prog->driver->io.auxCBSlot;
1561 Value *ind = suq->getIndirectR();
1562 uint32_t base;
1563 int c;
1564
1565 base = prog->driver->io.suInfoBase + suq->tex.r * NVE4_SU_INFO__STRIDE;
1566
1567 if (ind)
1568 ind = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(),
1569 ind, bld.mkImm(6));
1570
1571 for (c = 0; c < arg; ++c) {
1572 if (suq->defExists(c)) {
1573 int offset;
1574
1575 if (c == 1 && suq->tex.target == TEX_TARGET_1D_ARRAY) {
1576 offset = base + NVE4_SU_INFO_SIZE(2);
1577 } else {
1578 offset = base + NVE4_SU_INFO_SIZE(c);
1579 }
1580 bld.mkLoad(TYPE_U32, suq->getDef(c),
1581 bld.mkSymbol(FILE_MEMORY_CONST, s, TYPE_U32, offset), ind);
1582 }
1583 }
1584
1585 if (suq->tex.target.isCube()) {
1586 if (suq->defExists(2)) {
1587 bld.mkOp2(OP_DIV, TYPE_U32, suq->getDef(2), suq->getDef(2),
1588 bld.loadImm(NULL, 6));
1589 }
1590 }
1591
1592 if (suq->defExists(3)) {
1593 // .w contains the number of samples for multi-sampled images but we
1594 // don't support them for now.
1595 bld.mkMov(suq->getDef(3), bld.loadImm(NULL, 1));
1596 }
1597
1598 bld.remove(suq);
1599 return true;
1600 }
1601
1602 void
1603 NVC0LoweringPass::adjustCoordinatesMS(TexInstruction *tex)
1604 {
1605 const uint16_t base = tex->tex.r * NVE4_SU_INFO__STRIDE;
1606 const int arg = tex->tex.target.getArgCount();
1607
1608 if (tex->tex.target == TEX_TARGET_2D_MS)
1609 tex->tex.target = TEX_TARGET_2D;
1610 else
1611 if (tex->tex.target == TEX_TARGET_2D_MS_ARRAY)
1612 tex->tex.target = TEX_TARGET_2D_ARRAY;
1613 else
1614 return;
1615
1616 Value *x = tex->getSrc(0);
1617 Value *y = tex->getSrc(1);
1618 Value *s = tex->getSrc(arg - 1);
1619
1620 Value *tx = bld.getSSA(), *ty = bld.getSSA(), *ts = bld.getSSA();
1621 Value *ind = NULL;
1622
1623 if (tex->tex.rIndirectSrc >= 0) {
1624 assert(tex->tex.r == 0);
1625 // FIXME: out of bounds
1626 ind = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
1627 tex->getIndirectR(), bld.mkImm(6));
1628 }
1629
1630 Value *ms_x = loadSuInfo32(ind, base + NVE4_SU_INFO_MS(0));
1631 Value *ms_y = loadSuInfo32(ind, base + NVE4_SU_INFO_MS(1));
1632
1633 bld.mkOp2(OP_SHL, TYPE_U32, tx, x, ms_x);
1634 bld.mkOp2(OP_SHL, TYPE_U32, ty, y, ms_y);
1635
1636 s = bld.mkOp2v(OP_AND, TYPE_U32, ts, s, bld.loadImm(NULL, 0x7));
1637 s = bld.mkOp2v(OP_SHL, TYPE_U32, ts, ts, bld.mkImm(3));
1638
1639 Value *dx = loadMsInfo32(ts, 0x0);
1640 Value *dy = loadMsInfo32(ts, 0x4);
1641
1642 bld.mkOp2(OP_ADD, TYPE_U32, tx, tx, dx);
1643 bld.mkOp2(OP_ADD, TYPE_U32, ty, ty, dy);
1644
1645 tex->setSrc(0, tx);
1646 tex->setSrc(1, ty);
1647 tex->moveSources(arg, -1);
1648 }
1649
1650 // Sets 64-bit "generic address", predicate and format sources for SULD/SUST.
1651 // They're computed from the coordinates using the surface info in c[] space.
1652 void
1653 NVC0LoweringPass::processSurfaceCoordsNVE4(TexInstruction *su)
1654 {
1655 Instruction *insn;
1656 const bool atom = su->op == OP_SUREDB || su->op == OP_SUREDP;
1657 const bool raw =
1658 su->op == OP_SULDB || su->op == OP_SUSTB || su->op == OP_SUREDB;
1659 const int idx = su->tex.r;
1660 const int dim = su->tex.target.getDim();
1661 const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
1662 const uint16_t base = idx * NVE4_SU_INFO__STRIDE;
1663 int c;
1664 Value *zero = bld.mkImm(0);
1665 Value *p1 = NULL;
1666 Value *v;
1667 Value *src[3];
1668 Value *bf, *eau, *off;
1669 Value *addr, *pred;
1670 Value *ind = NULL;
1671
1672 off = bld.getScratch(4);
1673 bf = bld.getScratch(4);
1674 addr = bld.getSSA(8);
1675 pred = bld.getScratch(1, FILE_PREDICATE);
1676
1677 bld.setPosition(su, false);
1678
1679 adjustCoordinatesMS(su);
1680
1681 if (su->tex.rIndirectSrc >= 0) {
1682 ind = su->getIndirectR();
1683 if (su->tex.r > 0) {
1684 ind = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(), ind,
1685 bld.loadImm(NULL, su->tex.r));
1686 }
1687 ind = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ind, bld.mkImm(7));
1688 ind = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(), ind, bld.mkImm(6));
1689 }
1690
1691 // calculate clamped coordinates
1692 for (c = 0; c < arg; ++c) {
1693 int dimc = c;
1694
1695 if (c == 1 && su->tex.target == TEX_TARGET_1D_ARRAY) {
1696 // The array index is stored in the Z component for 1D arrays.
1697 dimc = 2;
1698 }
1699
1700 src[c] = bld.getScratch();
1701 if (c == 0 && raw)
1702 v = loadSuInfo32(ind, base + NVE4_SU_INFO_RAW_X);
1703 else
1704 v = loadSuInfo32(ind, base + NVE4_SU_INFO_DIM(dimc));
1705 bld.mkOp3(OP_SUCLAMP, TYPE_S32, src[c], su->getSrc(c), v, zero)
1706 ->subOp = getSuClampSubOp(su, dimc);
1707 }
1708 for (; c < 3; ++c)
1709 src[c] = zero;
1710
1711 // set predicate output
1712 if (su->tex.target == TEX_TARGET_BUFFER) {
1713 src[0]->getInsn()->setFlagsDef(1, pred);
1714 } else
1715 if (su->tex.target.isArray() || su->tex.target.isCube()) {
1716 p1 = bld.getSSA(1, FILE_PREDICATE);
1717 src[dim]->getInsn()->setFlagsDef(1, p1);
1718 }
1719
1720 // calculate pixel offset
1721 if (dim == 1) {
1722 if (su->tex.target != TEX_TARGET_BUFFER)
1723 bld.mkOp2(OP_AND, TYPE_U32, off, src[0], bld.loadImm(NULL, 0xffff));
1724 } else
1725 if (dim == 3) {
1726 v = loadSuInfo32(ind, base + NVE4_SU_INFO_UNK1C);
1727 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[2], v, src[1])
1728 ->subOp = NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1729
1730 v = loadSuInfo32(ind, base + NVE4_SU_INFO_PITCH);
1731 bld.mkOp3(OP_MADSP, TYPE_U32, off, off, v, src[0])
1732 ->subOp = NV50_IR_SUBOP_MADSP(0,2,8); // u32 u16l u16l
1733 } else {
1734 assert(dim == 2);
1735 v = loadSuInfo32(ind, base + NVE4_SU_INFO_PITCH);
1736 bld.mkOp3(OP_MADSP, TYPE_U32, off, src[1], v, src[0])
1737 ->subOp = (su->tex.target.isArray() || su->tex.target.isCube()) ?
1738 NV50_IR_SUBOP_MADSP_SD : NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1739 }
1740
1741 // calculate effective address part 1
1742 if (su->tex.target == TEX_TARGET_BUFFER) {
1743 if (raw) {
1744 bf = src[0];
1745 } else {
1746 v = loadSuInfo32(ind, base + NVE4_SU_INFO_FMT);
1747 bld.mkOp3(OP_VSHL, TYPE_U32, bf, src[0], v, zero)
1748 ->subOp = NV50_IR_SUBOP_V1(7,6,8|2);
1749 }
1750 } else {
1751 Value *y = src[1];
1752 Value *z = src[2];
1753 uint16_t subOp = 0;
1754
1755 switch (dim) {
1756 case 1:
1757 y = zero;
1758 z = zero;
1759 break;
1760 case 2:
1761 z = off;
1762 if (!su->tex.target.isArray() && !su->tex.target.isCube()) {
1763 z = loadSuInfo32(ind, base + NVE4_SU_INFO_UNK1C);
1764 subOp = NV50_IR_SUBOP_SUBFM_3D;
1765 }
1766 break;
1767 default:
1768 subOp = NV50_IR_SUBOP_SUBFM_3D;
1769 assert(dim == 3);
1770 break;
1771 }
1772 insn = bld.mkOp3(OP_SUBFM, TYPE_U32, bf, src[0], y, z);
1773 insn->subOp = subOp;
1774 insn->setFlagsDef(1, pred);
1775 }
1776
1777 // part 2
1778 v = loadSuInfo32(ind, base + NVE4_SU_INFO_ADDR);
1779
1780 if (su->tex.target == TEX_TARGET_BUFFER) {
1781 eau = v;
1782 } else {
1783 eau = bld.mkOp3v(OP_SUEAU, TYPE_U32, bld.getScratch(4), off, bf, v);
1784 }
1785 // add array layer offset
1786 if (su->tex.target.isArray() || su->tex.target.isCube()) {
1787 v = loadSuInfo32(ind, base + NVE4_SU_INFO_ARRAY);
1788 if (dim == 1)
1789 bld.mkOp3(OP_MADSP, TYPE_U32, eau, src[1], v, eau)
1790 ->subOp = NV50_IR_SUBOP_MADSP(4,0,0); // u16 u24 u32
1791 else
1792 bld.mkOp3(OP_MADSP, TYPE_U32, eau, v, src[2], eau)
1793 ->subOp = NV50_IR_SUBOP_MADSP(0,0,0); // u32 u24 u32
1794 // combine predicates
1795 assert(p1);
1796 bld.mkOp2(OP_OR, TYPE_U8, pred, pred, p1);
1797 }
1798
1799 if (atom) {
1800 Value *lo = bf;
1801 if (su->tex.target == TEX_TARGET_BUFFER) {
1802 lo = zero;
1803 bld.mkMov(off, bf);
1804 }
1805 // bf == g[] address & 0xff
1806 // eau == g[] address >> 8
1807 bld.mkOp3(OP_PERMT, TYPE_U32, bf, lo, bld.loadImm(NULL, 0x6540), eau);
1808 bld.mkOp3(OP_PERMT, TYPE_U32, eau, zero, bld.loadImm(NULL, 0x0007), eau);
1809 } else
1810 if (su->op == OP_SULDP && su->tex.target == TEX_TARGET_BUFFER) {
1811 // Convert from u32 to u8 address format, which is what the library code
1812 // doing SULDP currently uses.
1813 // XXX: can SUEAU do this ?
1814 // XXX: does it matter that we don't mask high bytes in bf ?
1815 // Grrr.
1816 bld.mkOp2(OP_SHR, TYPE_U32, off, bf, bld.mkImm(8));
1817 bld.mkOp2(OP_ADD, TYPE_U32, eau, eau, off);
1818 }
1819
1820 bld.mkOp2(OP_MERGE, TYPE_U64, addr, bf, eau);
1821
1822 if (atom && su->tex.target == TEX_TARGET_BUFFER)
1823 bld.mkOp2(OP_ADD, TYPE_U64, addr, addr, off);
1824
1825 // let's just set it 0 for raw access and hope it works
1826 v = raw ?
1827 bld.mkImm(0) : loadSuInfo32(ind, base + NVE4_SU_INFO_FMT);
1828
1829 // get rid of old coordinate sources, make space for fmt info and predicate
1830 su->moveSources(arg, 3 - arg);
1831 // set 64 bit address and 32-bit format sources
1832 su->setSrc(0, addr);
1833 su->setSrc(1, v);
1834 su->setSrc(2, pred);
1835
1836 // prevent read fault when the image is not actually bound
1837 CmpInstruction *pred1 =
1838 bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
1839 TYPE_U32, bld.mkImm(0),
1840 loadSuInfo32(ind, base + NVE4_SU_INFO_ADDR));
1841
1842 if (su->op != OP_SUSTP && su->tex.format) {
1843 const TexInstruction::ImgFormatDesc *format = su->tex.format;
1844 int blockwidth = format->bits[0] + format->bits[1] +
1845 format->bits[2] + format->bits[3];
1846
1847 // make sure that the format doesn't mismatch
1848 assert(format->components != 0);
1849 bld.mkCmp(OP_SET_OR, CC_NE, TYPE_U32, pred1->getDef(0),
1850 TYPE_U32, bld.loadImm(NULL, blockwidth / 8),
1851 loadSuInfo32(ind, base + NVE4_SU_INFO_BSIZE),
1852 pred1->getDef(0));
1853 }
1854 su->setPredicate(CC_NOT_P, pred1->getDef(0));
1855
1856 // TODO: initialize def values to 0 when the surface operation is not
1857 // performed (not needed for stores). Also, fix the "address bounds test"
1858 // subtests from arb_shader_image_load_store-invalid for buffers, because it
1859 // seems like that the predicate is not correctly set by suclamp.
1860 }
1861
1862 static DataType
1863 getSrcType(const TexInstruction::ImgFormatDesc *t, int c)
1864 {
1865 switch (t->type) {
1866 case FLOAT: return t->bits[c] == 16 ? TYPE_F16 : TYPE_F32;
1867 case UNORM: return t->bits[c] == 8 ? TYPE_U8 : TYPE_U16;
1868 case SNORM: return t->bits[c] == 8 ? TYPE_S8 : TYPE_S16;
1869 case UINT:
1870 return (t->bits[c] == 8 ? TYPE_U8 :
1871 (t->bits[c] == 16 ? TYPE_U16 : TYPE_U32));
1872 case SINT:
1873 return (t->bits[c] == 8 ? TYPE_S8 :
1874 (t->bits[c] == 16 ? TYPE_S16 : TYPE_S32));
1875 }
1876 return TYPE_NONE;
1877 }
1878
1879 static DataType
1880 getDestType(const ImgType type) {
1881 switch (type) {
1882 case FLOAT:
1883 case UNORM:
1884 case SNORM:
1885 return TYPE_F32;
1886 case UINT:
1887 return TYPE_U32;
1888 case SINT:
1889 return TYPE_S32;
1890 default:
1891 assert(!"Impossible type");
1892 return TYPE_NONE;
1893 }
1894 }
1895
1896 void
1897 NVC0LoweringPass::convertSurfaceFormat(TexInstruction *su)
1898 {
1899 const TexInstruction::ImgFormatDesc *format = su->tex.format;
1900 int width = format->bits[0] + format->bits[1] +
1901 format->bits[2] + format->bits[3];
1902 Value *untypedDst[4] = {};
1903 Value *typedDst[4] = {};
1904
1905 // We must convert this to a generic load.
1906 su->op = OP_SULDB;
1907
1908 su->dType = typeOfSize(width / 8);
1909 su->sType = TYPE_U8;
1910
1911 for (int i = 0; i < width / 32; i++)
1912 untypedDst[i] = bld.getSSA();
1913 if (width < 32)
1914 untypedDst[0] = bld.getSSA();
1915
1916 for (int i = 0; i < 4; i++) {
1917 typedDst[i] = su->getDef(i);
1918 }
1919
1920 // Set the untyped dsts as the su's destinations
1921 for (int i = 0; i < 4; i++)
1922 su->setDef(i, untypedDst[i]);
1923
1924 bld.setPosition(su, true);
1925
1926 // Unpack each component into the typed dsts
1927 int bits = 0;
1928 for (int i = 0; i < 4; bits += format->bits[i], i++) {
1929 if (!typedDst[i])
1930 continue;
1931 if (i >= format->components) {
1932 if (format->type == FLOAT ||
1933 format->type == UNORM ||
1934 format->type == SNORM)
1935 bld.loadImm(typedDst[i], i == 3 ? 1.0f : 0.0f);
1936 else
1937 bld.loadImm(typedDst[i], i == 3 ? 1 : 0);
1938 continue;
1939 }
1940
1941 // Get just that component's data into the relevant place
1942 if (format->bits[i] == 32)
1943 bld.mkMov(typedDst[i], untypedDst[i]);
1944 else if (format->bits[i] == 16)
1945 bld.mkCvt(OP_CVT, getDestType(format->type), typedDst[i],
1946 getSrcType(format, i), untypedDst[i / 2])
1947 ->subOp = (i & 1) << (format->type == FLOAT ? 0 : 1);
1948 else if (format->bits[i] == 8)
1949 bld.mkCvt(OP_CVT, getDestType(format->type), typedDst[i],
1950 getSrcType(format, i), untypedDst[0])->subOp = i;
1951 else {
1952 bld.mkOp2(OP_EXTBF, TYPE_U32, typedDst[i], untypedDst[bits / 32],
1953 bld.mkImm((bits % 32) | (format->bits[i] << 8)));
1954 if (format->type == UNORM || format->type == SNORM)
1955 bld.mkCvt(OP_CVT, TYPE_F32, typedDst[i], getSrcType(format, i), typedDst[i]);
1956 }
1957
1958 // Normalize / convert as necessary
1959 if (format->type == UNORM)
1960 bld.mkOp2(OP_MUL, TYPE_F32, typedDst[i], typedDst[i], bld.loadImm(NULL, 1.0f / ((1 << format->bits[i]) - 1)));
1961 else if (format->type == SNORM)
1962 bld.mkOp2(OP_MUL, TYPE_F32, typedDst[i], typedDst[i], bld.loadImm(NULL, 1.0f / ((1 << (format->bits[i] - 1)) - 1)));
1963 else if (format->type == FLOAT && format->bits[i] < 16) {
1964 bld.mkOp2(OP_SHL, TYPE_U32, typedDst[i], typedDst[i], bld.loadImm(NULL, 15 - format->bits[i]));
1965 bld.mkCvt(OP_CVT, TYPE_F32, typedDst[i], TYPE_F16, typedDst[i]);
1966 }
1967 }
1968 }
1969
1970 void
1971 NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction *su)
1972 {
1973 processSurfaceCoordsNVE4(su);
1974
1975 if (su->op == OP_SULDP)
1976 convertSurfaceFormat(su);
1977
1978 if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
1979 Value *pred = su->getSrc(2);
1980 CondCode cc = CC_NOT_P;
1981 if (su->getPredicate()) {
1982 pred = bld.getScratch(1, FILE_PREDICATE);
1983 cc = su->cc;
1984 if (cc == CC_NOT_P) {
1985 bld.mkOp2(OP_OR, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1986 } else {
1987 bld.mkOp2(OP_AND, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
1988 pred->getInsn()->src(1).mod = Modifier(NV50_IR_MOD_NOT);
1989 }
1990 }
1991 Instruction *red = bld.mkOp(OP_ATOM, su->dType, bld.getSSA());
1992 red->subOp = su->subOp;
1993 if (!gMemBase)
1994 gMemBase = bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0);
1995 red->setSrc(0, gMemBase);
1996 red->setSrc(1, su->getSrc(3));
1997 if (su->subOp == NV50_IR_SUBOP_ATOM_CAS)
1998 red->setSrc(2, su->getSrc(4));
1999 red->setIndirect(0, 0, su->getSrc(0));
2000
2001 // make sure to initialize dst value when the atomic operation is not
2002 // performed
2003 Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
2004
2005 assert(cc == CC_NOT_P);
2006 red->setPredicate(cc, pred);
2007 mov->setPredicate(CC_P, pred);
2008
2009 bld.mkOp2(OP_UNION, TYPE_U32, su->getDef(0),
2010 red->getDef(0), mov->getDef(0));
2011
2012 delete_Instruction(bld.getProgram(), su);
2013 handleCasExch(red, true);
2014 }
2015
2016 if (su->op == OP_SUSTB || su->op == OP_SUSTP)
2017 su->sType = (su->tex.target == TEX_TARGET_BUFFER) ? TYPE_U32 : TYPE_U8;
2018 }
2019
2020 void
2021 NVC0LoweringPass::processSurfaceCoordsNVC0(TexInstruction *su)
2022 {
2023 const int idx = su->tex.r;
2024 const int dim = su->tex.target.getDim();
2025 const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
2026 const uint16_t base = idx * NVE4_SU_INFO__STRIDE;
2027 int c;
2028 Value *zero = bld.mkImm(0);
2029 Value *src[3];
2030 Value *v;
2031 Value *ind = NULL;
2032
2033 if (su->tex.rIndirectSrc >= 0) {
2034 ind = su->getIndirectR();
2035 if (su->tex.r > 0) {
2036 ind = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(), ind,
2037 bld.loadImm(NULL, su->tex.r));
2038 }
2039 ind = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ind, bld.mkImm(7));
2040 ind = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(), ind, bld.mkImm(6));
2041 }
2042
2043 // get surface coordinates
2044 for (c = 0; c < arg; ++c)
2045 src[c] = su->getSrc(c);
2046 for (; c < 3; ++c)
2047 src[c] = zero;
2048
2049 // calculate pixel offset
2050 if (su->op == OP_SULDP || su->op == OP_SUREDP) {
2051 v = loadSuInfo32(ind, base + NVE4_SU_INFO_BSIZE);
2052 su->setSrc(0, bld.mkOp2v(OP_MUL, TYPE_U32, bld.getSSA(), src[0], v));
2053 }
2054
2055 // add array layer offset
2056 if (su->tex.target.isArray() || su->tex.target.isCube()) {
2057 v = loadSuInfo32(ind, base + NVE4_SU_INFO_ARRAY);
2058 assert(dim > 1);
2059 su->setSrc(2, bld.mkOp2v(OP_MUL, TYPE_U32, bld.getSSA(), src[2], v));
2060 }
2061
2062 // prevent read fault when the image is not actually bound
2063 CmpInstruction *pred =
2064 bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
2065 TYPE_U32, bld.mkImm(0),
2066 loadSuInfo32(ind, base + NVE4_SU_INFO_ADDR));
2067 if (su->op != OP_SUSTP && su->tex.format) {
2068 const TexInstruction::ImgFormatDesc *format = su->tex.format;
2069 int blockwidth = format->bits[0] + format->bits[1] +
2070 format->bits[2] + format->bits[3];
2071
2072 assert(format->components != 0);
2073 // make sure that the format doesn't mismatch when it's not FMT_NONE
2074 bld.mkCmp(OP_SET_OR, CC_NE, TYPE_U32, pred->getDef(0),
2075 TYPE_U32, bld.loadImm(NULL, blockwidth / 8),
2076 loadSuInfo32(ind, base + NVE4_SU_INFO_BSIZE),
2077 pred->getDef(0));
2078 }
2079 su->setPredicate(CC_NOT_P, pred->getDef(0));
2080 }
2081
2082 void
2083 NVC0LoweringPass::handleSurfaceOpNVC0(TexInstruction *su)
2084 {
2085 if (su->tex.target == TEX_TARGET_1D_ARRAY) {
2086 /* As 1d arrays also need 3 coordinates, switching to TEX_TARGET_2D_ARRAY
2087 * will simplify the lowering pass and the texture constraints. */
2088 su->moveSources(1, 1);
2089 su->setSrc(1, bld.loadImm(NULL, 0));
2090 su->tex.target = TEX_TARGET_2D_ARRAY;
2091 }
2092
2093 processSurfaceCoordsNVC0(su);
2094
2095 if (su->op == OP_SULDP)
2096 convertSurfaceFormat(su);
2097
2098 if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
2099 const int dim = su->tex.target.getDim();
2100 const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
2101 LValue *addr = bld.getSSA(8);
2102 Value *def = su->getDef(0);
2103
2104 su->op = OP_SULEA;
2105
2106 // Set the destination to the address
2107 su->dType = TYPE_U64;
2108 su->setDef(0, addr);
2109 su->setDef(1, su->getPredicate());
2110
2111 bld.setPosition(su, true);
2112
2113 // Perform the atomic op
2114 Instruction *red = bld.mkOp(OP_ATOM, su->sType, bld.getSSA());
2115 red->subOp = su->subOp;
2116 red->setSrc(0, bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, su->sType, 0));
2117 red->setSrc(1, su->getSrc(arg));
2118 if (red->subOp == NV50_IR_SUBOP_ATOM_CAS)
2119 red->setSrc(2, su->getSrc(arg + 1));
2120 red->setIndirect(0, 0, addr);
2121
2122 // make sure to initialize dst value when the atomic operation is not
2123 // performed
2124 Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
2125
2126 assert(su->cc == CC_NOT_P);
2127 red->setPredicate(su->cc, su->getPredicate());
2128 mov->setPredicate(CC_P, su->getPredicate());
2129
2130 bld.mkOp2(OP_UNION, TYPE_U32, def, red->getDef(0), mov->getDef(0));
2131
2132 handleCasExch(red, false);
2133 }
2134 }
2135
2136 bool
2137 NVC0LoweringPass::handleWRSV(Instruction *i)
2138 {
2139 Instruction *st;
2140 Symbol *sym;
2141 uint32_t addr;
2142
2143 // must replace, $sreg are not writeable
2144 addr = targ->getSVAddress(FILE_SHADER_OUTPUT, i->getSrc(0)->asSym());
2145 if (addr >= 0x400)
2146 return false;
2147 sym = bld.mkSymbol(FILE_SHADER_OUTPUT, 0, i->sType, addr);
2148
2149 st = bld.mkStore(OP_EXPORT, i->dType, sym, i->getIndirect(0, 0),
2150 i->getSrc(1));
2151 st->perPatch = i->perPatch;
2152
2153 bld.getBB()->remove(i);
2154 return true;
2155 }
2156
2157 void
2158 NVC0LoweringPass::handleLDST(Instruction *i)
2159 {
2160 if (i->src(0).getFile() == FILE_SHADER_INPUT) {
2161 if (prog->getType() == Program::TYPE_COMPUTE) {
2162 i->getSrc(0)->reg.file = FILE_MEMORY_CONST;
2163 i->getSrc(0)->reg.fileIndex = 0;
2164 } else
2165 if (prog->getType() == Program::TYPE_GEOMETRY &&
2166 i->src(0).isIndirect(0)) {
2167 // XXX: this assumes vec4 units
2168 Value *ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
2169 i->getIndirect(0, 0), bld.mkImm(4));
2170 i->setIndirect(0, 0, ptr);
2171 i->op = OP_VFETCH;
2172 } else {
2173 i->op = OP_VFETCH;
2174 assert(prog->getType() != Program::TYPE_FRAGMENT); // INTERP
2175 }
2176 } else if (i->src(0).getFile() == FILE_MEMORY_CONST) {
2177 if (targ->getChipset() >= NVISA_GK104_CHIPSET &&
2178 prog->getType() == Program::TYPE_COMPUTE) {
2179 // The launch descriptor only allows to set up 8 CBs, but OpenGL
2180 // requires at least 12 UBOs. To bypass this limitation, we store the
2181 // addrs into the driver constbuf and we directly load from the global
2182 // memory.
2183 int8_t fileIndex = i->getSrc(0)->reg.fileIndex - 1;
2184 Value *ind = i->getIndirect(0, 1);
2185
2186 if (ind) {
2187 // Clamp the UBO index when an indirect access is used to avoid
2188 // loading information from the wrong place in the driver cb.
2189 ind = bld.mkOp2v(OP_MIN, TYPE_U32, ind,
2190 bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(),
2191 ind, bld.loadImm(NULL, fileIndex)),
2192 bld.loadImm(NULL, 12));
2193 }
2194
2195 if (i->src(0).isIndirect(1)) {
2196 Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
2197 Value *ptr = loadUboInfo64(ind, fileIndex * 16);
2198 Value *length = loadUboLength32(ind, fileIndex * 16);
2199 Value *pred = new_LValue(func, FILE_PREDICATE);
2200 if (i->src(0).isIndirect(0)) {
2201 bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
2202 bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
2203 }
2204 i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
2205 i->setIndirect(0, 1, NULL);
2206 i->setIndirect(0, 0, ptr);
2207 bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
2208 i->setPredicate(CC_NOT_P, pred);
2209 if (i->defExists(0)) {
2210 bld.mkMov(i->getDef(0), bld.mkImm(0));
2211 }
2212 } else if (fileIndex >= 0) {
2213 Value *ptr = loadUboInfo64(ind, fileIndex * 16);
2214 if (i->src(0).isIndirect(0)) {
2215 bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
2216 }
2217 i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
2218 i->setIndirect(0, 1, NULL);
2219 i->setIndirect(0, 0, ptr);
2220 }
2221 } else if (i->src(0).isIndirect(1)) {
2222 Value *ptr;
2223 if (i->src(0).isIndirect(0))
2224 ptr = bld.mkOp3v(OP_INSBF, TYPE_U32, bld.getSSA(),
2225 i->getIndirect(0, 1), bld.mkImm(0x1010),
2226 i->getIndirect(0, 0));
2227 else
2228 ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
2229 i->getIndirect(0, 1), bld.mkImm(16));
2230 i->setIndirect(0, 1, NULL);
2231 i->setIndirect(0, 0, ptr);
2232 i->subOp = NV50_IR_SUBOP_LDC_IS;
2233 }
2234 } else if (i->src(0).getFile() == FILE_SHADER_OUTPUT) {
2235 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
2236 i->op = OP_VFETCH;
2237 } else if (i->src(0).getFile() == FILE_MEMORY_BUFFER) {
2238 Value *ind = i->getIndirect(0, 1);
2239 Value *ptr = loadBufInfo64(ind, i->getSrc(0)->reg.fileIndex * 16);
2240 // XXX come up with a way not to do this for EVERY little access but
2241 // rather to batch these up somehow. Unfortunately we've lost the
2242 // information about the field width by the time we get here.
2243 Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
2244 Value *length = loadBufLength32(ind, i->getSrc(0)->reg.fileIndex * 16);
2245 Value *pred = new_LValue(func, FILE_PREDICATE);
2246 if (i->src(0).isIndirect(0)) {
2247 bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
2248 bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
2249 }
2250 i->setIndirect(0, 1, NULL);
2251 i->setIndirect(0, 0, ptr);
2252 i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
2253 bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
2254 i->setPredicate(CC_NOT_P, pred);
2255 if (i->defExists(0)) {
2256 Value *zero, *dst = i->getDef(0);
2257 i->setDef(0, bld.getSSA());
2258
2259 bld.setPosition(i, true);
2260 bld.mkMov((zero = bld.getSSA()), bld.mkImm(0))
2261 ->setPredicate(CC_P, pred);
2262 bld.mkOp2(OP_UNION, TYPE_U32, dst, i->getDef(0), zero);
2263 }
2264 }
2265 }
2266
2267 void
2268 NVC0LoweringPass::readTessCoord(LValue *dst, int c)
2269 {
2270 Value *laneid = bld.getSSA();
2271 Value *x, *y;
2272
2273 bld.mkOp1(OP_RDSV, TYPE_U32, laneid, bld.mkSysVal(SV_LANEID, 0));
2274
2275 if (c == 0) {
2276 x = dst;
2277 y = NULL;
2278 } else
2279 if (c == 1) {
2280 x = NULL;
2281 y = dst;
2282 } else {
2283 assert(c == 2);
2284 if (prog->driver->prop.tp.domain != PIPE_PRIM_TRIANGLES) {
2285 bld.mkMov(dst, bld.loadImm(NULL, 0));
2286 return;
2287 }
2288 x = bld.getSSA();
2289 y = bld.getSSA();
2290 }
2291 if (x)
2292 bld.mkFetch(x, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f0, NULL, laneid);
2293 if (y)
2294 bld.mkFetch(y, TYPE_F32, FILE_SHADER_OUTPUT, 0x2f4, NULL, laneid);
2295
2296 if (c == 2) {
2297 bld.mkOp2(OP_ADD, TYPE_F32, dst, x, y);
2298 bld.mkOp2(OP_SUB, TYPE_F32, dst, bld.loadImm(NULL, 1.0f), dst);
2299 }
2300 }
2301
2302 bool
2303 NVC0LoweringPass::handleRDSV(Instruction *i)
2304 {
2305 Symbol *sym = i->getSrc(0)->asSym();
2306 const SVSemantic sv = sym->reg.data.sv.sv;
2307 Value *vtx = NULL;
2308 Instruction *ld;
2309 uint32_t addr = targ->getSVAddress(FILE_SHADER_INPUT, sym);
2310
2311 if (addr >= 0x400) {
2312 // mov $sreg
2313 if (sym->reg.data.sv.index == 3) {
2314 // TGSI backend may use 4th component of TID,NTID,CTAID,NCTAID
2315 i->op = OP_MOV;
2316 i->setSrc(0, bld.mkImm((sv == SV_NTID || sv == SV_NCTAID) ? 1 : 0));
2317 }
2318 if (sv == SV_VERTEX_COUNT) {
2319 bld.setPosition(i, true);
2320 bld.mkOp2(OP_EXTBF, TYPE_U32, i->getDef(0), i->getDef(0), bld.mkImm(0x808));
2321 }
2322 return true;
2323 }
2324
2325 switch (sv) {
2326 case SV_POSITION:
2327 assert(prog->getType() == Program::TYPE_FRAGMENT);
2328 if (i->srcExists(1)) {
2329 // Pass offset through to the interpolation logic
2330 ld = bld.mkInterp(NV50_IR_INTERP_LINEAR | NV50_IR_INTERP_OFFSET,
2331 i->getDef(0), addr, NULL);
2332 ld->setSrc(1, i->getSrc(1));
2333 } else {
2334 bld.mkInterp(NV50_IR_INTERP_LINEAR, i->getDef(0), addr, NULL);
2335 }
2336 break;
2337 case SV_FACE:
2338 {
2339 Value *face = i->getDef(0);
2340 bld.mkInterp(NV50_IR_INTERP_FLAT, face, addr, NULL);
2341 if (i->dType == TYPE_F32) {
2342 bld.mkOp2(OP_OR, TYPE_U32, face, face, bld.mkImm(0x00000001));
2343 bld.mkOp1(OP_NEG, TYPE_S32, face, face);
2344 bld.mkCvt(OP_CVT, TYPE_F32, face, TYPE_S32, face);
2345 }
2346 }
2347 break;
2348 case SV_TESS_COORD:
2349 assert(prog->getType() == Program::TYPE_TESSELLATION_EVAL);
2350 readTessCoord(i->getDef(0)->asLValue(), i->getSrc(0)->reg.data.sv.index);
2351 break;
2352 case SV_NTID:
2353 case SV_NCTAID:
2354 case SV_GRIDID:
2355 assert(targ->getChipset() >= NVISA_GK104_CHIPSET); // mov $sreg otherwise
2356 if (sym->reg.data.sv.index == 3) {
2357 i->op = OP_MOV;
2358 i->setSrc(0, bld.mkImm(sv == SV_GRIDID ? 0 : 1));
2359 return true;
2360 }
2361 addr += prog->driver->prop.cp.gridInfoBase;
2362 bld.mkLoad(TYPE_U32, i->getDef(0),
2363 bld.mkSymbol(FILE_MEMORY_CONST, prog->driver->io.auxCBSlot,
2364 TYPE_U32, addr), NULL);
2365 break;
2366 case SV_SAMPLE_INDEX:
2367 // TODO: Properly pass source as an address in the PIX address space
2368 // (which can be of the form [r0+offset]). But this is currently
2369 // unnecessary.
2370 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
2371 ld->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
2372 break;
2373 case SV_SAMPLE_POS: {
2374 Value *off = new_LValue(func, FILE_GPR);
2375 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
2376 ld->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
2377 bld.mkOp2(OP_SHL, TYPE_U32, off, i->getDef(0), bld.mkImm(3));
2378 bld.mkLoad(TYPE_F32,
2379 i->getDef(0),
2380 bld.mkSymbol(
2381 FILE_MEMORY_CONST, prog->driver->io.auxCBSlot,
2382 TYPE_U32, prog->driver->io.sampleInfoBase +
2383 4 * sym->reg.data.sv.index),
2384 off);
2385 break;
2386 }
2387 case SV_SAMPLE_MASK: {
2388 ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
2389 ld->subOp = NV50_IR_SUBOP_PIXLD_COVMASK;
2390 Instruction *sampleid =
2391 bld.mkOp1(OP_PIXLD, TYPE_U32, bld.getSSA(), bld.mkImm(0));
2392 sampleid->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
2393 Value *masked =
2394 bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ld->getDef(0),
2395 bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
2396 bld.loadImm(NULL, 1), sampleid->getDef(0)));
2397 if (prog->driver->prop.fp.persampleInvocation) {
2398 bld.mkMov(i->getDef(0), masked);
2399 } else {
2400 bld.mkOp3(OP_SELP, TYPE_U32, i->getDef(0), ld->getDef(0), masked,
2401 bld.mkImm(0))
2402 ->subOp = 1;
2403 }
2404 break;
2405 }
2406 case SV_BASEVERTEX:
2407 case SV_BASEINSTANCE:
2408 case SV_DRAWID:
2409 ld = bld.mkLoad(TYPE_U32, i->getDef(0),
2410 bld.mkSymbol(FILE_MEMORY_CONST,
2411 prog->driver->io.auxCBSlot,
2412 TYPE_U32,
2413 prog->driver->io.drawInfoBase +
2414 4 * (sv - SV_BASEVERTEX)),
2415 NULL);
2416 break;
2417 default:
2418 if (prog->getType() == Program::TYPE_TESSELLATION_EVAL && !i->perPatch)
2419 vtx = bld.mkOp1v(OP_PFETCH, TYPE_U32, bld.getSSA(), bld.mkImm(0));
2420 ld = bld.mkFetch(i->getDef(0), i->dType,
2421 FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
2422 ld->perPatch = i->perPatch;
2423 break;
2424 }
2425 bld.getBB()->remove(i);
2426 return true;
2427 }
2428
2429 bool
2430 NVC0LoweringPass::handleDIV(Instruction *i)
2431 {
2432 if (!isFloatType(i->dType))
2433 return true;
2434 bld.setPosition(i, false);
2435 Instruction *rcp = bld.mkOp1(OP_RCP, i->dType, bld.getSSA(typeSizeof(i->dType)), i->getSrc(1));
2436 i->op = OP_MUL;
2437 i->setSrc(1, rcp->getDef(0));
2438 return true;
2439 }
2440
2441 bool
2442 NVC0LoweringPass::handleMOD(Instruction *i)
2443 {
2444 if (!isFloatType(i->dType))
2445 return true;
2446 LValue *value = bld.getScratch(typeSizeof(i->dType));
2447 bld.mkOp1(OP_RCP, i->dType, value, i->getSrc(1));
2448 bld.mkOp2(OP_MUL, i->dType, value, i->getSrc(0), value);
2449 bld.mkOp1(OP_TRUNC, i->dType, value, value);
2450 bld.mkOp2(OP_MUL, i->dType, value, i->getSrc(1), value);
2451 i->op = OP_SUB;
2452 i->setSrc(1, value);
2453 return true;
2454 }
2455
2456 bool
2457 NVC0LoweringPass::handleSQRT(Instruction *i)
2458 {
2459 if (i->dType == TYPE_F64) {
2460 Value *pred = bld.getSSA(1, FILE_PREDICATE);
2461 Value *zero = bld.loadImm(NULL, 0.0);
2462 Value *dst = bld.getSSA(8);
2463 bld.mkOp1(OP_RSQ, i->dType, dst, i->getSrc(0));
2464 bld.mkCmp(OP_SET, CC_LE, i->dType, pred, i->dType, i->getSrc(0), zero);
2465 bld.mkOp3(OP_SELP, TYPE_U64, dst, zero, dst, pred);
2466 i->op = OP_MUL;
2467 i->setSrc(1, dst);
2468 // TODO: Handle this properly with a library function
2469 } else {
2470 bld.setPosition(i, true);
2471 i->op = OP_RSQ;
2472 bld.mkOp1(OP_RCP, i->dType, i->getDef(0), i->getDef(0));
2473 }
2474
2475 return true;
2476 }
2477
2478 bool
2479 NVC0LoweringPass::handlePOW(Instruction *i)
2480 {
2481 LValue *val = bld.getScratch();
2482
2483 bld.mkOp1(OP_LG2, TYPE_F32, val, i->getSrc(0));
2484 bld.mkOp2(OP_MUL, TYPE_F32, val, i->getSrc(1), val)->dnz = 1;
2485 bld.mkOp1(OP_PREEX2, TYPE_F32, val, val);
2486
2487 i->op = OP_EX2;
2488 i->setSrc(0, val);
2489 i->setSrc(1, NULL);
2490
2491 return true;
2492 }
2493
2494 bool
2495 NVC0LoweringPass::handleEXPORT(Instruction *i)
2496 {
2497 if (prog->getType() == Program::TYPE_FRAGMENT) {
2498 int id = i->getSrc(0)->reg.data.offset / 4;
2499
2500 if (i->src(0).isIndirect(0)) // TODO, ugly
2501 return false;
2502 i->op = OP_MOV;
2503 i->subOp = NV50_IR_SUBOP_MOV_FINAL;
2504 i->src(0).set(i->src(1));
2505 i->setSrc(1, NULL);
2506 i->setDef(0, new_LValue(func, FILE_GPR));
2507 i->getDef(0)->reg.data.id = id;
2508
2509 prog->maxGPR = MAX2(prog->maxGPR, id);
2510 } else
2511 if (prog->getType() == Program::TYPE_GEOMETRY) {
2512 i->setIndirect(0, 1, gpEmitAddress);
2513 }
2514 return true;
2515 }
2516
2517 bool
2518 NVC0LoweringPass::handleOUT(Instruction *i)
2519 {
2520 Instruction *prev = i->prev;
2521 ImmediateValue stream, prevStream;
2522
2523 // Only merge if the stream ids match. Also, note that the previous
2524 // instruction would have already been lowered, so we take arg1 from it.
2525 if (i->op == OP_RESTART && prev && prev->op == OP_EMIT &&
2526 i->src(0).getImmediate(stream) &&
2527 prev->src(1).getImmediate(prevStream) &&
2528 stream.reg.data.u32 == prevStream.reg.data.u32) {
2529 i->prev->subOp = NV50_IR_SUBOP_EMIT_RESTART;
2530 delete_Instruction(prog, i);
2531 } else {
2532 assert(gpEmitAddress);
2533 i->setDef(0, gpEmitAddress);
2534 i->setSrc(1, i->getSrc(0));
2535 i->setSrc(0, gpEmitAddress);
2536 }
2537 return true;
2538 }
2539
2540 // Generate a binary predicate if an instruction is predicated by
2541 // e.g. an f32 value.
2542 void
2543 NVC0LoweringPass::checkPredicate(Instruction *insn)
2544 {
2545 Value *pred = insn->getPredicate();
2546 Value *pdst;
2547
2548 if (!pred || pred->reg.file == FILE_PREDICATE)
2549 return;
2550 pdst = new_LValue(func, FILE_PREDICATE);
2551
2552 // CAUTION: don't use pdst->getInsn, the definition might not be unique,
2553 // delay turning PSET(FSET(x,y),0) into PSET(x,y) to a later pass
2554
2555 bld.mkCmp(OP_SET, CC_NEU, insn->dType, pdst, insn->dType, bld.mkImm(0), pred);
2556
2557 insn->setPredicate(insn->cc, pdst);
2558 }
2559
2560 //
2561 // - add quadop dance for texturing
2562 // - put FP outputs in GPRs
2563 // - convert instruction sequences
2564 //
2565 bool
2566 NVC0LoweringPass::visit(Instruction *i)
2567 {
2568 bool ret = true;
2569 bld.setPosition(i, false);
2570
2571 if (i->cc != CC_ALWAYS)
2572 checkPredicate(i);
2573
2574 switch (i->op) {
2575 case OP_TEX:
2576 case OP_TXB:
2577 case OP_TXL:
2578 case OP_TXF:
2579 case OP_TXG:
2580 return handleTEX(i->asTex());
2581 case OP_TXD:
2582 return handleTXD(i->asTex());
2583 case OP_TXLQ:
2584 return handleTXLQ(i->asTex());
2585 case OP_TXQ:
2586 return handleTXQ(i->asTex());
2587 case OP_EX2:
2588 bld.mkOp1(OP_PREEX2, TYPE_F32, i->getDef(0), i->getSrc(0));
2589 i->setSrc(0, i->getDef(0));
2590 break;
2591 case OP_POW:
2592 return handlePOW(i);
2593 case OP_DIV:
2594 return handleDIV(i);
2595 case OP_MOD:
2596 return handleMOD(i);
2597 case OP_SQRT:
2598 return handleSQRT(i);
2599 case OP_EXPORT:
2600 ret = handleEXPORT(i);
2601 break;
2602 case OP_EMIT:
2603 case OP_RESTART:
2604 return handleOUT(i);
2605 case OP_RDSV:
2606 return handleRDSV(i);
2607 case OP_WRSV:
2608 return handleWRSV(i);
2609 case OP_STORE:
2610 case OP_LOAD:
2611 handleLDST(i);
2612 break;
2613 case OP_ATOM:
2614 {
2615 const bool cctl = i->src(0).getFile() == FILE_MEMORY_BUFFER;
2616 handleATOM(i);
2617 handleCasExch(i, cctl);
2618 }
2619 break;
2620 case OP_SULDB:
2621 case OP_SULDP:
2622 case OP_SUSTB:
2623 case OP_SUSTP:
2624 case OP_SUREDB:
2625 case OP_SUREDP:
2626 if (targ->getChipset() >= NVISA_GK104_CHIPSET)
2627 handleSurfaceOpNVE4(i->asTex());
2628 else
2629 handleSurfaceOpNVC0(i->asTex());
2630 break;
2631 case OP_SUQ:
2632 handleSUQ(i->asTex());
2633 break;
2634 case OP_BUFQ:
2635 handleBUFQ(i);
2636 break;
2637 default:
2638 break;
2639 }
2640
2641 /* Kepler+ has a special opcode to compute a new base address to be used
2642 * for indirect loads.
2643 */
2644 if (targ->getChipset() >= NVISA_GK104_CHIPSET && !i->perPatch &&
2645 (i->op == OP_VFETCH || i->op == OP_EXPORT) && i->src(0).isIndirect(0)) {
2646 Instruction *afetch = bld.mkOp1(OP_AFETCH, TYPE_U32, bld.getSSA(),
2647 cloneShallow(func, i->getSrc(0)));
2648 afetch->setIndirect(0, 0, i->getIndirect(0, 0));
2649 i->src(0).get()->reg.data.offset = 0;
2650 i->setIndirect(0, 0, afetch->getDef(0));
2651 }
2652
2653 return ret;
2654 }
2655
2656 bool
2657 TargetNVC0::runLegalizePass(Program *prog, CGStage stage) const
2658 {
2659 if (stage == CG_STAGE_PRE_SSA) {
2660 NVC0LoweringPass pass(prog);
2661 return pass.run(prog, false, true);
2662 } else
2663 if (stage == CG_STAGE_POST_RA) {
2664 NVC0LegalizePostRA pass(prog);
2665 return pass.run(prog, false, true);
2666 } else
2667 if (stage == CG_STAGE_SSA) {
2668 NVC0LegalizeSSA pass;
2669 return pass.run(prog, false, true);
2670 }
2671 return false;
2672 }
2673
2674 } // namespace nv50_ir