2 * Copyright 2011 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_build_util.h"
26 #include "codegen/nv50_ir_target_nvc0.h"
27 #include "codegen/nv50_ir_lowering_nvc0.h"
39 #define QUADOP(q, r, s, t) \
40 ((QOP_##q << 6) | (QOP_##r << 4) | \
41 (QOP_##s << 2) | (QOP_##t << 0))
44 NVC0LegalizeSSA::handleDIV(Instruction
*i
)
46 FlowInstruction
*call
;
50 bld
.setPosition(i
, false);
51 def
[0] = bld
.mkMovToReg(0, i
->getSrc(0))->getDef(0);
52 def
[1] = bld
.mkMovToReg(1, i
->getSrc(1))->getDef(0);
54 case TYPE_U32
: builtin
= NVC0_BUILTIN_DIV_U32
; break;
55 case TYPE_S32
: builtin
= NVC0_BUILTIN_DIV_S32
; break;
59 call
= bld
.mkFlow(OP_CALL
, NULL
, CC_ALWAYS
, NULL
);
60 bld
.mkMov(i
->getDef(0), def
[(i
->op
== OP_DIV
) ? 0 : 1]);
61 bld
.mkClobber(FILE_GPR
, (i
->op
== OP_DIV
) ? 0xe : 0xd, 2);
62 bld
.mkClobber(FILE_PREDICATE
, (i
->dType
== TYPE_S32
) ? 0xf : 0x3, 0);
65 call
->absolute
= call
->builtin
= 1;
66 call
->target
.builtin
= builtin
;
67 delete_Instruction(prog
, i
);
71 NVC0LegalizeSSA::handleRCPRSQ(Instruction
*i
)
73 assert(i
->dType
== TYPE_F64
);
74 // There are instructions that will compute the high 32 bits of the 64-bit
75 // float. We will just stick 0 in the bottom 32 bits.
77 bld
.setPosition(i
, false);
79 // 1. Take the source and it up.
80 Value
*src
[2], *dst
[2], *def
= i
->getDef(0);
81 bld
.mkSplit(src
, 4, i
->getSrc(0));
83 // 2. We don't care about the low 32 bits of the destination. Stick a 0 in.
84 dst
[0] = bld
.loadImm(NULL
, 0);
85 dst
[1] = bld
.getSSA();
87 // 3. The new version of the instruction takes the high 32 bits of the
88 // source and outputs the high 32 bits of the destination.
92 i
->subOp
= NV50_IR_SUBOP_RCPRSQ_64H
;
94 // 4. Recombine the two dst pieces back into the original destination.
95 bld
.setPosition(i
, true);
96 bld
.mkOp2(OP_MERGE
, TYPE_U64
, def
, dst
[0], dst
[1]);
100 NVC0LegalizeSSA::handleFTZ(Instruction
*i
)
102 // Only want to flush float inputs
103 assert(i
->sType
== TYPE_F32
);
105 // If we're already flushing denorms (and NaN's) to zero, no need for this.
109 // Only certain classes of operations can flush
110 OpClass cls
= prog
->getTarget()->getOpClass(i
->op
);
111 if (cls
!= OPCLASS_ARITH
&& cls
!= OPCLASS_COMPARE
&&
112 cls
!= OPCLASS_CONVERT
)
119 NVC0LegalizeSSA::visit(Function
*fn
)
121 bld
.setProgram(fn
->getProgram());
126 NVC0LegalizeSSA::visit(BasicBlock
*bb
)
129 for (Instruction
*i
= bb
->getEntry(); i
; i
= next
) {
131 if (i
->sType
== TYPE_F32
) {
132 if (prog
->getType() != Program::TYPE_COMPUTE
)
143 if (i
->dType
== TYPE_F64
)
153 NVC0LegalizePostRA::NVC0LegalizePostRA(const Program
*prog
)
156 needTexBar(prog
->getTarget()->getChipset() >= 0xe0)
161 NVC0LegalizePostRA::insnDominatedBy(const Instruction
*later
,
162 const Instruction
*early
) const
164 if (early
->bb
== later
->bb
)
165 return early
->serial
< later
->serial
;
166 return later
->bb
->dominatedBy(early
->bb
);
170 NVC0LegalizePostRA::addTexUse(std::list
<TexUse
> &uses
,
171 Instruction
*usei
, const Instruction
*texi
)
174 for (std::list
<TexUse
>::iterator it
= uses
.begin();
176 if (insnDominatedBy(usei
, it
->insn
)) {
180 if (insnDominatedBy(it
->insn
, usei
))
186 uses
.push_back(TexUse(usei
, texi
));
189 // While it might be tempting to use the an algorithm that just looks at tex
190 // uses, not all texture results are guaranteed to be used on all paths. In
191 // the case where along some control flow path a texture result is never used,
192 // we might reuse that register for something else, creating a
193 // write-after-write hazard. So we have to manually look through all
194 // instructions looking for ones that reference the registers in question.
196 NVC0LegalizePostRA::findFirstUses(
197 Instruction
*texi
, std::list
<TexUse
> &uses
)
199 int minGPR
= texi
->def(0).rep()->reg
.data
.id
;
200 int maxGPR
= minGPR
+ texi
->def(0).rep()->reg
.size
/ 4 - 1;
202 unordered_set
<const BasicBlock
*> visited
;
203 findFirstUsesBB(minGPR
, maxGPR
, texi
->next
, texi
, uses
, visited
);
207 NVC0LegalizePostRA::findFirstUsesBB(
208 int minGPR
, int maxGPR
, Instruction
*start
,
209 const Instruction
*texi
, std::list
<TexUse
> &uses
,
210 unordered_set
<const BasicBlock
*> &visited
)
212 const BasicBlock
*bb
= start
->bb
;
214 // We don't process the whole bb the first time around. This is correct,
215 // however we might be in a loop and hit this BB again, and need to process
216 // the full thing. So only mark a bb as visited if we processed it from the
218 if (start
== bb
->getEntry()) {
219 if (visited
.find(bb
) != visited
.end())
224 for (Instruction
*insn
= start
; insn
!= bb
->getExit(); insn
= insn
->next
) {
228 for (int d
= 0; insn
->defExists(d
); ++d
) {
229 if (insn
->def(d
).getFile() != FILE_GPR
||
230 insn
->def(d
).rep()->reg
.data
.id
< minGPR
||
231 insn
->def(d
).rep()->reg
.data
.id
> maxGPR
)
233 addTexUse(uses
, insn
, texi
);
237 for (int s
= 0; insn
->srcExists(s
); ++s
) {
238 if (insn
->src(s
).getFile() != FILE_GPR
||
239 insn
->src(s
).rep()->reg
.data
.id
< minGPR
||
240 insn
->src(s
).rep()->reg
.data
.id
> maxGPR
)
242 addTexUse(uses
, insn
, texi
);
247 for (Graph::EdgeIterator ei
= bb
->cfg
.outgoing(); !ei
.end(); ei
.next()) {
248 findFirstUsesBB(minGPR
, maxGPR
, BasicBlock::get(ei
.getNode())->getEntry(),
249 texi
, uses
, visited
);
254 // This pass is a bit long and ugly and can probably be optimized.
256 // 1. obtain a list of TEXes and their outputs' first use(s)
257 // 2. calculate the barrier level of each first use (minimal number of TEXes,
258 // over all paths, between the TEX and the use in question)
259 // 3. for each barrier, if all paths from the source TEX to that barrier
260 // contain a barrier of lesser level, it can be culled
262 NVC0LegalizePostRA::insertTextureBarriers(Function
*fn
)
264 std::list
<TexUse
> *uses
;
265 std::vector
<Instruction
*> texes
;
266 std::vector
<int> bbFirstTex
;
267 std::vector
<int> bbFirstUse
;
268 std::vector
<int> texCounts
;
269 std::vector
<TexUse
> useVec
;
272 fn
->orderInstructions(insns
);
274 texCounts
.resize(fn
->allBBlocks
.getSize(), 0);
275 bbFirstTex
.resize(fn
->allBBlocks
.getSize(), insns
.getSize());
276 bbFirstUse
.resize(fn
->allBBlocks
.getSize(), insns
.getSize());
278 // tag BB CFG nodes by their id for later
279 for (ArrayList::Iterator i
= fn
->allBBlocks
.iterator(); !i
.end(); i
.next()) {
280 BasicBlock
*bb
= reinterpret_cast<BasicBlock
*>(i
.get());
282 bb
->cfg
.tag
= bb
->getId();
285 // gather the first uses for each TEX
286 for (int i
= 0; i
< insns
.getSize(); ++i
) {
287 Instruction
*tex
= reinterpret_cast<Instruction
*>(insns
.get(i
));
288 if (isTextureOp(tex
->op
)) {
289 texes
.push_back(tex
);
290 if (!texCounts
.at(tex
->bb
->getId()))
291 bbFirstTex
[tex
->bb
->getId()] = texes
.size() - 1;
292 texCounts
[tex
->bb
->getId()]++;
298 uses
= new std::list
<TexUse
>[texes
.size()];
301 for (size_t i
= 0; i
< texes
.size(); ++i
) {
302 findFirstUses(texes
[i
], uses
[i
]);
305 // determine the barrier level at each use
306 for (size_t i
= 0; i
< texes
.size(); ++i
) {
307 for (std::list
<TexUse
>::iterator u
= uses
[i
].begin(); u
!= uses
[i
].end();
309 BasicBlock
*tb
= texes
[i
]->bb
;
310 BasicBlock
*ub
= u
->insn
->bb
;
313 for (size_t j
= i
+ 1; j
< texes
.size() &&
314 texes
[j
]->bb
== tb
&& texes
[j
]->serial
< u
->insn
->serial
;
318 u
->level
= fn
->cfg
.findLightestPathWeight(&tb
->cfg
,
319 &ub
->cfg
, texCounts
);
321 WARN("Failed to find path TEX -> TEXBAR\n");
325 // this counted all TEXes in the origin block, correct that
326 u
->level
-= i
- bbFirstTex
.at(tb
->getId()) + 1 /* this TEX */;
327 // and did not count the TEXes in the destination block, add those
328 for (size_t j
= bbFirstTex
.at(ub
->getId()); j
< texes
.size() &&
329 texes
[j
]->bb
== ub
&& texes
[j
]->serial
< u
->insn
->serial
;
333 assert(u
->level
>= 0);
334 useVec
.push_back(*u
);
339 // insert the barriers
340 for (size_t i
= 0; i
< useVec
.size(); ++i
) {
341 Instruction
*prev
= useVec
[i
].insn
->prev
;
342 if (useVec
[i
].level
< 0)
344 if (prev
&& prev
->op
== OP_TEXBAR
) {
345 if (prev
->subOp
> useVec
[i
].level
)
346 prev
->subOp
= useVec
[i
].level
;
347 prev
->setSrc(prev
->srcCount(), useVec
[i
].tex
->getDef(0));
349 Instruction
*bar
= new_Instruction(func
, OP_TEXBAR
, TYPE_NONE
);
351 bar
->subOp
= useVec
[i
].level
;
352 // make use explicit to ease latency calculation
353 bar
->setSrc(bar
->srcCount(), useVec
[i
].tex
->getDef(0));
354 useVec
[i
].insn
->bb
->insertBefore(useVec
[i
].insn
, bar
);
358 if (fn
->getProgram()->optLevel
< 3)
361 std::vector
<Limits
> limitT
, limitB
, limitS
; // entry, exit, single
363 limitT
.resize(fn
->allBBlocks
.getSize(), Limits(0, 0));
364 limitB
.resize(fn
->allBBlocks
.getSize(), Limits(0, 0));
365 limitS
.resize(fn
->allBBlocks
.getSize());
367 // cull unneeded barriers (should do that earlier, but for simplicity)
368 IteratorRef bi
= fn
->cfg
.iteratorCFG();
369 // first calculate min/max outstanding TEXes for each BB
370 for (bi
->reset(); !bi
->end(); bi
->next()) {
371 Graph::Node
*n
= reinterpret_cast<Graph::Node
*>(bi
->get());
372 BasicBlock
*bb
= BasicBlock::get(n
);
374 int max
= std::numeric_limits
<int>::max();
375 for (Instruction
*i
= bb
->getFirst(); i
; i
= i
->next
) {
376 if (isTextureOp(i
->op
)) {
378 if (max
< std::numeric_limits
<int>::max())
381 if (i
->op
== OP_TEXBAR
) {
382 min
= MIN2(min
, i
->subOp
);
383 max
= MIN2(max
, i
->subOp
);
386 // limits when looking at an isolated block
387 limitS
[bb
->getId()].min
= min
;
388 limitS
[bb
->getId()].max
= max
;
390 // propagate the min/max values
391 for (unsigned int l
= 0; l
<= fn
->loopNestingBound
; ++l
) {
392 for (bi
->reset(); !bi
->end(); bi
->next()) {
393 Graph::Node
*n
= reinterpret_cast<Graph::Node
*>(bi
->get());
394 BasicBlock
*bb
= BasicBlock::get(n
);
395 const int bbId
= bb
->getId();
396 for (Graph::EdgeIterator ei
= n
->incident(); !ei
.end(); ei
.next()) {
397 BasicBlock
*in
= BasicBlock::get(ei
.getNode());
398 const int inId
= in
->getId();
399 limitT
[bbId
].min
= MAX2(limitT
[bbId
].min
, limitB
[inId
].min
);
400 limitT
[bbId
].max
= MAX2(limitT
[bbId
].max
, limitB
[inId
].max
);
402 // I just hope this is correct ...
403 if (limitS
[bbId
].max
== std::numeric_limits
<int>::max()) {
405 limitB
[bbId
].min
= limitT
[bbId
].min
+ limitS
[bbId
].min
;
406 limitB
[bbId
].max
= limitT
[bbId
].max
+ limitS
[bbId
].min
;
408 // block contained a barrier
409 limitB
[bbId
].min
= MIN2(limitS
[bbId
].max
,
410 limitT
[bbId
].min
+ limitS
[bbId
].min
);
411 limitB
[bbId
].max
= MIN2(limitS
[bbId
].max
,
412 limitT
[bbId
].max
+ limitS
[bbId
].min
);
416 // finally delete unnecessary barriers
417 for (bi
->reset(); !bi
->end(); bi
->next()) {
418 Graph::Node
*n
= reinterpret_cast<Graph::Node
*>(bi
->get());
419 BasicBlock
*bb
= BasicBlock::get(n
);
420 Instruction
*prev
= NULL
;
422 int max
= limitT
[bb
->getId()].max
;
423 for (Instruction
*i
= bb
->getFirst(); i
; i
= next
) {
425 if (i
->op
== OP_TEXBAR
) {
426 if (i
->subOp
>= max
) {
427 delete_Instruction(prog
, i
);
431 if (prev
&& prev
->op
== OP_TEXBAR
&& prev
->subOp
>= max
) {
432 delete_Instruction(prog
, prev
);
437 if (isTextureOp(i
->op
)) {
440 if (i
&& !i
->isNop())
448 NVC0LegalizePostRA::visit(Function
*fn
)
451 insertTextureBarriers(fn
);
453 rZero
= new_LValue(fn
, FILE_GPR
);
454 carry
= new_LValue(fn
, FILE_FLAGS
);
456 rZero
->reg
.data
.id
= prog
->getTarget()->getFileSize(FILE_GPR
);
457 carry
->reg
.data
.id
= 0;
463 NVC0LegalizePostRA::replaceZero(Instruction
*i
)
465 for (int s
= 0; i
->srcExists(s
); ++s
) {
466 if (s
== 2 && i
->op
== OP_SUCLAMP
)
468 ImmediateValue
*imm
= i
->getSrc(s
)->asImm();
469 if (imm
&& imm
->reg
.data
.u64
== 0)
474 // replace CONT with BRA for single unconditional continue
476 NVC0LegalizePostRA::tryReplaceContWithBra(BasicBlock
*bb
)
478 if (bb
->cfg
.incidentCount() != 2 || bb
->getEntry()->op
!= OP_PRECONT
)
480 Graph::EdgeIterator ei
= bb
->cfg
.incident();
481 if (ei
.getType() != Graph::Edge::BACK
)
483 if (ei
.getType() != Graph::Edge::BACK
)
485 BasicBlock
*contBB
= BasicBlock::get(ei
.getNode());
487 if (!contBB
->getExit() || contBB
->getExit()->op
!= OP_CONT
||
488 contBB
->getExit()->getPredicate())
490 contBB
->getExit()->op
= OP_BRA
;
491 bb
->remove(bb
->getEntry()); // delete PRECONT
494 assert(ei
.end() || ei
.getType() != Graph::Edge::BACK
);
498 // replace branches to join blocks with join ops
500 NVC0LegalizePostRA::propagateJoin(BasicBlock
*bb
)
502 if (bb
->getEntry()->op
!= OP_JOIN
|| bb
->getEntry()->asFlow()->limit
)
504 for (Graph::EdgeIterator ei
= bb
->cfg
.incident(); !ei
.end(); ei
.next()) {
505 BasicBlock
*in
= BasicBlock::get(ei
.getNode());
506 Instruction
*exit
= in
->getExit();
508 in
->insertTail(new FlowInstruction(func
, OP_JOIN
, bb
));
509 // there should always be a terminator instruction
510 WARN("inserted missing terminator in BB:%i\n", in
->getId());
512 if (exit
->op
== OP_BRA
) {
514 exit
->asFlow()->limit
= 1; // must-not-propagate marker
517 bb
->remove(bb
->getEntry());
521 NVC0LegalizePostRA::visit(BasicBlock
*bb
)
523 Instruction
*i
, *next
;
525 // remove pseudo operations and non-fixed no-ops, split 64 bit operations
526 for (i
= bb
->getFirst(); i
; i
= next
) {
528 if (i
->op
== OP_EMIT
|| i
->op
== OP_RESTART
) {
529 if (!i
->getDef(0)->refCount())
531 if (i
->src(0).getFile() == FILE_IMMEDIATE
)
532 i
->setSrc(0, rZero
); // initial value must be 0
538 if (i
->op
== OP_BAR
&& i
->subOp
== NV50_IR_SUBOP_BAR_SYNC
&&
539 prog
->getType() != Program::TYPE_COMPUTE
) {
540 // It seems like barriers are never required for tessellation since
541 // the warp size is 32, and there are always at most 32 tcs threads.
544 // TODO: Move this to before register allocation for operations that
545 // need the $c register !
546 if (typeSizeof(i
->dType
) == 8) {
548 hi
= BuildUtil::split64BitOpPostRA(func
, i
, rZero
, carry
);
553 if (i
->op
!= OP_MOV
&& i
->op
!= OP_PFETCH
)
560 if (!tryReplaceContWithBra(bb
))
566 NVC0LoweringPass::NVC0LoweringPass(Program
*prog
) : targ(prog
->getTarget())
568 bld
.setProgram(prog
);
573 NVC0LoweringPass::visit(Function
*fn
)
575 if (prog
->getType() == Program::TYPE_GEOMETRY
) {
576 assert(!strncmp(fn
->getName(), "MAIN", 4));
577 // TODO: when we generate actual functions pass this value along somehow
578 bld
.setPosition(BasicBlock::get(fn
->cfg
.getRoot()), false);
579 gpEmitAddress
= bld
.loadImm(NULL
, 0)->asLValue();
581 bld
.setPosition(BasicBlock::get(fn
->cfgExit
)->getExit(), false);
582 bld
.mkMovToReg(0, gpEmitAddress
);
589 NVC0LoweringPass::visit(BasicBlock
*bb
)
595 NVC0LoweringPass::loadTexHandle(Value
*ptr
, unsigned int slot
)
597 uint8_t b
= prog
->driver
->io
.resInfoCBSlot
;
598 uint32_t off
= prog
->driver
->io
.texBindBase
+ slot
* 4;
600 mkLoadv(TYPE_U32
, bld
.mkSymbol(FILE_MEMORY_CONST
, b
, TYPE_U32
, off
), ptr
);
603 // move array source to first slot, convert to u16, add indirections
605 NVC0LoweringPass::handleTEX(TexInstruction
*i
)
607 const int dim
= i
->tex
.target
.getDim() + i
->tex
.target
.isCube();
608 const int arg
= i
->tex
.target
.getArgCount();
609 const int lyr
= arg
- (i
->tex
.target
.isMS() ? 2 : 1);
610 const int chipset
= prog
->getTarget()->getChipset();
612 // Arguments to the TEX instruction are a little insane. Even though the
613 // encoding is identical between SM20 and SM30, the arguments mean
614 // different things between Fermi and Kepler+. A lot of arguments are
615 // optional based on flags passed to the instruction. This summarizes the
625 // - tg4: 8 bits each, either 2 (1 offset reg) or 8 (2 offset reg)
626 // - other: 4 bits each, single reg
630 // array (+ offsets for txd in upper 16 bits)
635 // offsets (same as fermi, except txd which takes it with array)
652 if (chipset
>= NVISA_GK104_CHIPSET
) {
653 if (i
->tex
.rIndirectSrc
>= 0 || i
->tex
.sIndirectSrc
>= 0) {
654 // XXX this ignores tsc, and assumes a 1:1 mapping
655 assert(i
->tex
.rIndirectSrc
>= 0);
656 Value
*hnd
= loadTexHandle(
657 bld
.mkOp2v(OP_SHL
, TYPE_U32
, bld
.getSSA(),
658 i
->getIndirectR(), bld
.mkImm(2)),
662 i
->setIndirectR(hnd
);
663 i
->setIndirectS(NULL
);
664 } else if (i
->tex
.r
== i
->tex
.s
|| i
->op
== OP_TXF
) {
665 i
->tex
.r
+= prog
->driver
->io
.texBindBase
/ 4;
666 i
->tex
.s
= 0; // only a single cX[] value possible here
668 Value
*hnd
= bld
.getScratch();
669 Value
*rHnd
= loadTexHandle(NULL
, i
->tex
.r
);
670 Value
*sHnd
= loadTexHandle(NULL
, i
->tex
.s
);
672 bld
.mkOp3(OP_INSBF
, TYPE_U32
, hnd
, rHnd
, bld
.mkImm(0x1400), sHnd
);
674 i
->tex
.r
= 0; // not used for indirect tex
676 i
->setIndirectR(hnd
);
678 if (i
->tex
.target
.isArray()) {
679 LValue
*layer
= new_LValue(func
, FILE_GPR
);
680 Value
*src
= i
->getSrc(lyr
);
681 const int sat
= (i
->op
== OP_TXF
) ? 1 : 0;
682 DataType sTy
= (i
->op
== OP_TXF
) ? TYPE_U32
: TYPE_F32
;
683 bld
.mkCvt(OP_CVT
, TYPE_U16
, layer
, sTy
, src
)->saturate
= sat
;
684 if (i
->op
!= OP_TXD
|| chipset
< NVISA_GM107_CHIPSET
) {
685 for (int s
= dim
; s
>= 1; --s
)
686 i
->setSrc(s
, i
->getSrc(s
- 1));
689 i
->setSrc(dim
, layer
);
692 // Move the indirect reference to the first place
693 if (i
->tex
.rIndirectSrc
>= 0 && (
694 i
->op
== OP_TXD
|| chipset
< NVISA_GM107_CHIPSET
)) {
695 Value
*hnd
= i
->getIndirectR();
697 i
->setIndirectR(NULL
);
698 i
->moveSources(0, 1);
700 i
->tex
.rIndirectSrc
= 0;
701 i
->tex
.sIndirectSrc
= -1;
704 // (nvc0) generate and move the tsc/tic/array source to the front
705 if (i
->tex
.target
.isArray() || i
->tex
.rIndirectSrc
>= 0 || i
->tex
.sIndirectSrc
>= 0) {
706 LValue
*src
= new_LValue(func
, FILE_GPR
); // 0xttxsaaaa
708 Value
*ticRel
= i
->getIndirectR();
709 Value
*tscRel
= i
->getIndirectS();
712 i
->setSrc(i
->tex
.rIndirectSrc
, NULL
);
714 ticRel
= bld
.mkOp2v(OP_ADD
, TYPE_U32
, bld
.getScratch(),
715 ticRel
, bld
.mkImm(i
->tex
.r
));
718 i
->setSrc(i
->tex
.sIndirectSrc
, NULL
);
720 tscRel
= bld
.mkOp2v(OP_ADD
, TYPE_U32
, bld
.getScratch(),
721 tscRel
, bld
.mkImm(i
->tex
.s
));
724 Value
*arrayIndex
= i
->tex
.target
.isArray() ? i
->getSrc(lyr
) : NULL
;
725 for (int s
= dim
; s
>= 1; --s
)
726 i
->setSrc(s
, i
->getSrc(s
- 1));
727 i
->setSrc(0, arrayIndex
);
730 int sat
= (i
->op
== OP_TXF
) ? 1 : 0;
731 DataType sTy
= (i
->op
== OP_TXF
) ? TYPE_U32
: TYPE_F32
;
732 bld
.mkCvt(OP_CVT
, TYPE_U16
, src
, sTy
, arrayIndex
)->saturate
= sat
;
738 bld
.mkOp3(OP_INSBF
, TYPE_U32
, src
, ticRel
, bld
.mkImm(0x0917), src
);
740 bld
.mkOp3(OP_INSBF
, TYPE_U32
, src
, tscRel
, bld
.mkImm(0x0710), src
);
745 // For nvc0, the sample id has to be in the second operand, as the offset
746 // does. Right now we don't know how to pass both in, and this case can't
747 // happen with OpenGL. On nve0, the sample id is part of the texture
748 // coordinate argument.
749 assert(chipset
>= NVISA_GK104_CHIPSET
||
750 !i
->tex
.useOffsets
|| !i
->tex
.target
.isMS());
752 // offset is between lod and dc
753 if (i
->tex
.useOffsets
) {
755 int s
= i
->srcCount(0xff, true);
756 if (i
->op
!= OP_TXD
|| chipset
< NVISA_GK104_CHIPSET
) {
757 if (i
->tex
.target
.isShadow())
759 if (i
->srcExists(s
)) // move potential predicate out of the way
760 i
->moveSources(s
, 1);
761 if (i
->tex
.useOffsets
== 4 && i
->srcExists(s
+ 1))
762 i
->moveSources(s
+ 1, 1);
764 if (i
->op
== OP_TXG
) {
765 // Either there is 1 offset, which goes into the 2 low bytes of the
766 // first source, or there are 4 offsets, which go into 2 sources (8
767 // values, 1 byte each).
768 Value
*offs
[2] = {NULL
, NULL
};
769 for (n
= 0; n
< i
->tex
.useOffsets
; n
++) {
770 for (c
= 0; c
< 2; ++c
) {
771 if ((n
% 2) == 0 && c
== 0)
772 offs
[n
/ 2] = i
->offset
[n
][c
].get();
774 bld
.mkOp3(OP_INSBF
, TYPE_U32
,
776 i
->offset
[n
][c
].get(),
777 bld
.mkImm(0x800 | ((n
* 16 + c
* 8) % 32)),
781 i
->setSrc(s
, offs
[0]);
783 i
->setSrc(s
+ 1, offs
[1]);
786 assert(i
->tex
.useOffsets
== 1);
787 for (c
= 0; c
< 3; ++c
) {
789 if (!i
->offset
[0][c
].getImmediate(val
))
790 assert(!"non-immediate offset passed to non-TXG");
791 imm
|= (val
.reg
.data
.u32
& 0xf) << (c
* 4);
793 if (i
->op
== OP_TXD
&& chipset
>= NVISA_GK104_CHIPSET
) {
794 // The offset goes into the upper 16 bits of the array index. So
795 // create it if it's not already there, and INSBF it if it already
797 s
= (i
->tex
.rIndirectSrc
>= 0) ? 1 : 0;
798 if (chipset
>= NVISA_GM107_CHIPSET
)
800 if (i
->tex
.target
.isArray()) {
801 bld
.mkOp3(OP_INSBF
, TYPE_U32
, i
->getSrc(s
),
802 bld
.loadImm(NULL
, imm
), bld
.mkImm(0xc10),
805 i
->moveSources(s
, 1);
806 i
->setSrc(s
, bld
.loadImm(NULL
, imm
<< 16));
809 i
->setSrc(s
, bld
.loadImm(NULL
, imm
));
814 if (chipset
>= NVISA_GK104_CHIPSET
) {
816 // If TEX requires more than 4 sources, the 2nd register tuple must be
817 // aligned to 4, even if it consists of just a single 4-byte register.
819 // XXX HACK: We insert 0 sources to avoid the 5 or 6 regs case.
821 int s
= i
->srcCount(0xff, true);
822 if (s
> 4 && s
< 7) {
823 if (i
->srcExists(s
)) // move potential predicate out of the way
824 i
->moveSources(s
, 7 - s
);
826 i
->setSrc(s
++, bld
.loadImm(NULL
, 0));
834 NVC0LoweringPass::handleManualTXD(TexInstruction
*i
)
836 static const uint8_t qOps
[4][2] =
838 { QUADOP(MOV2
, ADD
, MOV2
, ADD
), QUADOP(MOV2
, MOV2
, ADD
, ADD
) }, // l0
839 { QUADOP(SUBR
, MOV2
, SUBR
, MOV2
), QUADOP(MOV2
, MOV2
, ADD
, ADD
) }, // l1
840 { QUADOP(MOV2
, ADD
, MOV2
, ADD
), QUADOP(SUBR
, SUBR
, MOV2
, MOV2
) }, // l2
841 { QUADOP(SUBR
, MOV2
, SUBR
, MOV2
), QUADOP(SUBR
, SUBR
, MOV2
, MOV2
) }, // l3
846 Value
*zero
= bld
.loadImm(bld
.getSSA(), 0);
848 const int dim
= i
->tex
.target
.getDim() + i
->tex
.target
.isCube();
849 const int array
= i
->tex
.target
.isArray();
851 i
->op
= OP_TEX
; // no need to clone dPdx/dPdy later
853 for (c
= 0; c
< dim
; ++c
)
854 crd
[c
] = bld
.getScratch();
856 bld
.mkOp(OP_QUADON
, TYPE_NONE
, NULL
);
857 for (l
= 0; l
< 4; ++l
) {
858 // mov coordinates from lane l to all lanes
859 for (c
= 0; c
< dim
; ++c
)
860 bld
.mkQuadop(0x00, crd
[c
], l
, i
->getSrc(c
+ array
), zero
);
861 // add dPdx from lane l to lanes dx
862 for (c
= 0; c
< dim
; ++c
)
863 bld
.mkQuadop(qOps
[l
][0], crd
[c
], l
, i
->dPdx
[c
].get(), crd
[c
]);
864 // add dPdy from lane l to lanes dy
865 for (c
= 0; c
< dim
; ++c
)
866 bld
.mkQuadop(qOps
[l
][1], crd
[c
], l
, i
->dPdy
[c
].get(), crd
[c
]);
868 bld
.insert(tex
= cloneForward(func
, i
));
869 for (c
= 0; c
< dim
; ++c
)
870 tex
->setSrc(c
+ array
, crd
[c
]);
872 for (c
= 0; i
->defExists(c
); ++c
) {
874 def
[c
][l
] = bld
.getSSA();
875 mov
= bld
.mkMov(def
[c
][l
], tex
->getDef(c
));
880 bld
.mkOp(OP_QUADPOP
, TYPE_NONE
, NULL
);
882 for (c
= 0; i
->defExists(c
); ++c
) {
883 Instruction
*u
= bld
.mkOp(OP_UNION
, TYPE_U32
, i
->getDef(c
));
884 for (l
= 0; l
< 4; ++l
)
885 u
->setSrc(l
, def
[c
][l
]);
893 NVC0LoweringPass::handleTXD(TexInstruction
*txd
)
895 int dim
= txd
->tex
.target
.getDim() + txd
->tex
.target
.isCube();
896 unsigned arg
= txd
->tex
.target
.getArgCount();
897 unsigned expected_args
= arg
;
898 const int chipset
= prog
->getTarget()->getChipset();
900 if (chipset
>= NVISA_GK104_CHIPSET
) {
901 if (!txd
->tex
.target
.isArray() && txd
->tex
.useOffsets
)
903 if (txd
->tex
.rIndirectSrc
>= 0 || txd
->tex
.sIndirectSrc
>= 0)
906 if (txd
->tex
.useOffsets
)
908 if (!txd
->tex
.target
.isArray() && (
909 txd
->tex
.rIndirectSrc
>= 0 || txd
->tex
.sIndirectSrc
>= 0))
913 if (expected_args
> 4 ||
915 txd
->tex
.target
.isShadow())
919 while (txd
->srcExists(arg
))
922 txd
->tex
.derivAll
= true;
923 if (txd
->op
== OP_TEX
)
924 return handleManualTXD(txd
);
926 assert(arg
== expected_args
);
927 for (int c
= 0; c
< dim
; ++c
) {
928 txd
->setSrc(arg
+ c
* 2 + 0, txd
->dPdx
[c
]);
929 txd
->setSrc(arg
+ c
* 2 + 1, txd
->dPdy
[c
]);
930 txd
->dPdx
[c
].set(NULL
);
931 txd
->dPdy
[c
].set(NULL
);
937 NVC0LoweringPass::handleTXQ(TexInstruction
*txq
)
939 const int chipset
= prog
->getTarget()->getChipset();
940 if (chipset
>= NVISA_GK104_CHIPSET
&& txq
->tex
.rIndirectSrc
< 0)
941 txq
->tex
.r
+= prog
->driver
->io
.texBindBase
/ 4;
943 if (txq
->tex
.rIndirectSrc
< 0)
946 Value
*ticRel
= txq
->getIndirectR();
948 txq
->setIndirectS(NULL
);
949 txq
->tex
.sIndirectSrc
= -1;
953 if (chipset
< NVISA_GK104_CHIPSET
) {
954 LValue
*src
= new_LValue(func
, FILE_GPR
); // 0xttxsaaaa
956 txq
->setSrc(txq
->tex
.rIndirectSrc
, NULL
);
958 ticRel
= bld
.mkOp2v(OP_ADD
, TYPE_U32
, bld
.getScratch(),
959 ticRel
, bld
.mkImm(txq
->tex
.r
));
961 bld
.mkOp2(OP_SHL
, TYPE_U32
, src
, ticRel
, bld
.mkImm(0x17));
963 txq
->moveSources(0, 1);
966 Value
*hnd
= loadTexHandle(
967 bld
.mkOp2v(OP_SHL
, TYPE_U32
, bld
.getSSA(),
968 txq
->getIndirectR(), bld
.mkImm(2)),
973 txq
->setIndirectR(NULL
);
974 txq
->moveSources(0, 1);
976 txq
->tex
.rIndirectSrc
= 0;
983 NVC0LoweringPass::handleTXLQ(TexInstruction
*i
)
985 /* The outputs are inverted compared to what the TGSI instruction
986 * expects. Take that into account in the mask.
988 assert((i
->tex
.mask
& ~3) == 0);
989 if (i
->tex
.mask
== 1)
991 else if (i
->tex
.mask
== 2)
994 bld
.setPosition(i
, true);
996 /* The returned values are not quite what we want:
997 * (a) convert from s16/u16 to f32
998 * (b) multiply by 1/256
1000 for (int def
= 0; def
< 2; ++def
) {
1001 if (!i
->defExists(def
))
1003 enum DataType type
= TYPE_S16
;
1004 if (i
->tex
.mask
== 2 || def
> 0)
1006 bld
.mkCvt(OP_CVT
, TYPE_F32
, i
->getDef(def
), type
, i
->getDef(def
));
1007 bld
.mkOp2(OP_MUL
, TYPE_F32
, i
->getDef(def
),
1008 i
->getDef(def
), bld
.loadImm(NULL
, 1.0f
/ 256));
1010 if (i
->tex
.mask
== 3) {
1011 LValue
*t
= new_LValue(func
, FILE_GPR
);
1012 bld
.mkMov(t
, i
->getDef(0));
1013 bld
.mkMov(i
->getDef(0), i
->getDef(1));
1014 bld
.mkMov(i
->getDef(1), t
);
1021 NVC0LoweringPass::handleATOM(Instruction
*atom
)
1025 switch (atom
->src(0).getFile()) {
1026 case FILE_MEMORY_LOCAL
:
1029 case FILE_MEMORY_SHARED
:
1033 assert(atom
->src(0).getFile() == FILE_MEMORY_GLOBAL
);
1037 bld
.mkOp1v(OP_RDSV
, TYPE_U32
, bld
.getScratch(), bld
.mkSysVal(sv
, 0));
1038 Value
*ptr
= atom
->getIndirect(0, 0);
1040 atom
->setSrc(0, cloneShallow(func
, atom
->getSrc(0)));
1041 atom
->getSrc(0)->reg
.file
= FILE_MEMORY_GLOBAL
;
1043 base
= bld
.mkOp2v(OP_ADD
, TYPE_U32
, base
, base
, ptr
);
1044 atom
->setIndirect(0, 0, base
);
1050 NVC0LoweringPass::handleCasExch(Instruction
*cas
, bool needCctl
)
1052 if (cas
->subOp
!= NV50_IR_SUBOP_ATOM_CAS
&&
1053 cas
->subOp
!= NV50_IR_SUBOP_ATOM_EXCH
)
1055 bld
.setPosition(cas
, true);
1058 Instruction
*cctl
= bld
.mkOp1(OP_CCTL
, TYPE_NONE
, NULL
, cas
->getSrc(0));
1059 cctl
->setIndirect(0, 0, cas
->getIndirect(0, 0));
1061 cctl
->subOp
= NV50_IR_SUBOP_CCTL_IV
;
1062 if (cas
->isPredicated())
1063 cctl
->setPredicate(cas
->cc
, cas
->getPredicate());
1066 if (cas
->defExists(0) && cas
->subOp
== NV50_IR_SUBOP_ATOM_CAS
) {
1067 // CAS is crazy. It's 2nd source is a double reg, and the 3rd source
1068 // should be set to the high part of the double reg or bad things will
1069 // happen elsewhere in the universe.
1070 // Also, it sometimes returns the new value instead of the old one
1071 // under mysterious circumstances.
1072 Value
*dreg
= bld
.getSSA(8);
1073 bld
.setPosition(cas
, false);
1074 bld
.mkOp2(OP_MERGE
, TYPE_U64
, dreg
, cas
->getSrc(1), cas
->getSrc(2));
1075 cas
->setSrc(1, dreg
);
1082 NVC0LoweringPass::loadResInfo32(Value
*ptr
, uint32_t off
)
1084 uint8_t b
= prog
->driver
->io
.resInfoCBSlot
;
1085 off
+= prog
->driver
->io
.suInfoBase
;
1087 mkLoadv(TYPE_U32
, bld
.mkSymbol(FILE_MEMORY_CONST
, b
, TYPE_U32
, off
), ptr
);
1091 NVC0LoweringPass::loadMsInfo32(Value
*ptr
, uint32_t off
)
1093 uint8_t b
= prog
->driver
->io
.msInfoCBSlot
;
1094 off
+= prog
->driver
->io
.msInfoBase
;
1096 mkLoadv(TYPE_U32
, bld
.mkSymbol(FILE_MEMORY_CONST
, b
, TYPE_U32
, off
), ptr
);
1099 /* On nvc0, surface info is obtained via the surface binding points passed
1100 * to the SULD/SUST instructions.
1101 * On nve4, surface info is stored in c[] and is used by various special
1102 * instructions, e.g. for clamping coordiantes or generating an address.
1103 * They couldn't just have added an equivalent to TIC now, couldn't they ?
1105 #define NVE4_SU_INFO_ADDR 0x00
1106 #define NVE4_SU_INFO_FMT 0x04
1107 #define NVE4_SU_INFO_DIM_X 0x08
1108 #define NVE4_SU_INFO_PITCH 0x0c
1109 #define NVE4_SU_INFO_DIM_Y 0x10
1110 #define NVE4_SU_INFO_ARRAY 0x14
1111 #define NVE4_SU_INFO_DIM_Z 0x18
1112 #define NVE4_SU_INFO_UNK1C 0x1c
1113 #define NVE4_SU_INFO_WIDTH 0x20
1114 #define NVE4_SU_INFO_HEIGHT 0x24
1115 #define NVE4_SU_INFO_DEPTH 0x28
1116 #define NVE4_SU_INFO_TARGET 0x2c
1117 #define NVE4_SU_INFO_CALL 0x30
1118 #define NVE4_SU_INFO_RAW_X 0x34
1119 #define NVE4_SU_INFO_MS_X 0x38
1120 #define NVE4_SU_INFO_MS_Y 0x3c
1122 #define NVE4_SU_INFO__STRIDE 0x40
1124 #define NVE4_SU_INFO_DIM(i) (0x08 + (i) * 8)
1125 #define NVE4_SU_INFO_SIZE(i) (0x20 + (i) * 4)
1126 #define NVE4_SU_INFO_MS(i) (0x38 + (i) * 4)
1128 static inline uint16_t getSuClampSubOp(const TexInstruction
*su
, int c
)
1130 switch (su
->tex
.target
.getEnum()) {
1131 case TEX_TARGET_BUFFER
: return NV50_IR_SUBOP_SUCLAMP_PL(0, 1);
1132 case TEX_TARGET_RECT
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1133 case TEX_TARGET_1D
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1134 case TEX_TARGET_1D_ARRAY
: return (c
== 1) ?
1135 NV50_IR_SUBOP_SUCLAMP_PL(0, 2) :
1136 NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1137 case TEX_TARGET_2D
: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1138 case TEX_TARGET_2D_MS
: return NV50_IR_SUBOP_SUCLAMP_BL(0, 2);
1139 case TEX_TARGET_2D_ARRAY
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1140 case TEX_TARGET_2D_MS_ARRAY
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1141 case TEX_TARGET_3D
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1142 case TEX_TARGET_CUBE
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1143 case TEX_TARGET_CUBE_ARRAY
: return NV50_IR_SUBOP_SUCLAMP_SD(0, 2);
1151 NVC0LoweringPass::adjustCoordinatesMS(TexInstruction
*tex
)
1153 const uint16_t base
= tex
->tex
.r
* NVE4_SU_INFO__STRIDE
;
1154 const int arg
= tex
->tex
.target
.getArgCount();
1156 if (tex
->tex
.target
== TEX_TARGET_2D_MS
)
1157 tex
->tex
.target
= TEX_TARGET_2D
;
1159 if (tex
->tex
.target
== TEX_TARGET_2D_MS_ARRAY
)
1160 tex
->tex
.target
= TEX_TARGET_2D_ARRAY
;
1164 Value
*x
= tex
->getSrc(0);
1165 Value
*y
= tex
->getSrc(1);
1166 Value
*s
= tex
->getSrc(arg
- 1);
1168 Value
*tx
= bld
.getSSA(), *ty
= bld
.getSSA(), *ts
= bld
.getSSA();
1170 Value
*ms_x
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_MS(0));
1171 Value
*ms_y
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_MS(1));
1173 bld
.mkOp2(OP_SHL
, TYPE_U32
, tx
, x
, ms_x
);
1174 bld
.mkOp2(OP_SHL
, TYPE_U32
, ty
, y
, ms_y
);
1176 s
= bld
.mkOp2v(OP_AND
, TYPE_U32
, ts
, s
, bld
.loadImm(NULL
, 0x7));
1177 s
= bld
.mkOp2v(OP_SHL
, TYPE_U32
, ts
, ts
, bld
.mkImm(3));
1179 Value
*dx
= loadMsInfo32(ts
, 0x0);
1180 Value
*dy
= loadMsInfo32(ts
, 0x4);
1182 bld
.mkOp2(OP_ADD
, TYPE_U32
, tx
, tx
, dx
);
1183 bld
.mkOp2(OP_ADD
, TYPE_U32
, ty
, ty
, dy
);
1187 tex
->moveSources(arg
, -1);
1190 // Sets 64-bit "generic address", predicate and format sources for SULD/SUST.
1191 // They're computed from the coordinates using the surface info in c[] space.
1193 NVC0LoweringPass::processSurfaceCoordsNVE4(TexInstruction
*su
)
1196 const bool atom
= su
->op
== OP_SUREDB
|| su
->op
== OP_SUREDP
;
1198 su
->op
== OP_SULDB
|| su
->op
== OP_SUSTB
|| su
->op
== OP_SUREDB
;
1199 const int idx
= su
->tex
.r
;
1200 const int dim
= su
->tex
.target
.getDim();
1201 const int arg
= dim
+ (su
->tex
.target
.isArray() ? 1 : 0);
1202 const uint16_t base
= idx
* NVE4_SU_INFO__STRIDE
;
1204 Value
*zero
= bld
.mkImm(0);
1208 Value
*bf
, *eau
, *off
;
1211 off
= bld
.getScratch(4);
1212 bf
= bld
.getScratch(4);
1213 addr
= bld
.getSSA(8);
1214 pred
= bld
.getScratch(1, FILE_PREDICATE
);
1216 bld
.setPosition(su
, false);
1218 adjustCoordinatesMS(su
);
1220 // calculate clamped coordinates
1221 for (c
= 0; c
< arg
; ++c
) {
1222 src
[c
] = bld
.getScratch();
1224 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_RAW_X
);
1226 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_DIM(c
));
1227 bld
.mkOp3(OP_SUCLAMP
, TYPE_S32
, src
[c
], su
->getSrc(c
), v
, zero
)
1228 ->subOp
= getSuClampSubOp(su
, c
);
1233 // set predicate output
1234 if (su
->tex
.target
== TEX_TARGET_BUFFER
) {
1235 src
[0]->getInsn()->setFlagsDef(1, pred
);
1237 if (su
->tex
.target
.isArray()) {
1238 p1
= bld
.getSSA(1, FILE_PREDICATE
);
1239 src
[dim
]->getInsn()->setFlagsDef(1, p1
);
1242 // calculate pixel offset
1244 if (su
->tex
.target
!= TEX_TARGET_BUFFER
)
1245 bld
.mkOp2(OP_AND
, TYPE_U32
, off
, src
[0], bld
.loadImm(NULL
, 0xffff));
1248 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_UNK1C
);
1249 bld
.mkOp3(OP_MADSP
, TYPE_U32
, off
, src
[2], v
, src
[1])
1250 ->subOp
= NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1252 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_PITCH
);
1253 bld
.mkOp3(OP_MADSP
, TYPE_U32
, off
, off
, v
, src
[0])
1254 ->subOp
= NV50_IR_SUBOP_MADSP(0,2,8); // u32 u16l u16l
1257 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_PITCH
);
1258 bld
.mkOp3(OP_MADSP
, TYPE_U32
, off
, src
[1], v
, src
[0])
1259 ->subOp
= su
->tex
.target
.isArray() ?
1260 NV50_IR_SUBOP_MADSP_SD
: NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
1263 // calculate effective address part 1
1264 if (su
->tex
.target
== TEX_TARGET_BUFFER
) {
1268 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_FMT
);
1269 bld
.mkOp3(OP_VSHL
, TYPE_U32
, bf
, src
[0], v
, zero
)
1270 ->subOp
= NV50_IR_SUBOP_V1(7,6,8|2);
1284 if (!su
->tex
.target
.isArray()) {
1285 z
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_UNK1C
);
1286 subOp
= NV50_IR_SUBOP_SUBFM_3D
;
1290 subOp
= NV50_IR_SUBOP_SUBFM_3D
;
1294 insn
= bld
.mkOp3(OP_SUBFM
, TYPE_U32
, bf
, src
[0], y
, z
);
1295 insn
->subOp
= subOp
;
1296 insn
->setFlagsDef(1, pred
);
1300 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_ADDR
);
1302 if (su
->tex
.target
== TEX_TARGET_BUFFER
) {
1305 eau
= bld
.mkOp3v(OP_SUEAU
, TYPE_U32
, bld
.getScratch(4), off
, bf
, v
);
1307 // add array layer offset
1308 if (su
->tex
.target
.isArray()) {
1309 v
= loadResInfo32(NULL
, base
+ NVE4_SU_INFO_ARRAY
);
1311 bld
.mkOp3(OP_MADSP
, TYPE_U32
, eau
, src
[1], v
, eau
)
1312 ->subOp
= NV50_IR_SUBOP_MADSP(4,0,0); // u16 u24 u32
1314 bld
.mkOp3(OP_MADSP
, TYPE_U32
, eau
, v
, src
[2], eau
)
1315 ->subOp
= NV50_IR_SUBOP_MADSP(0,0,0); // u32 u24 u32
1316 // combine predicates
1318 bld
.mkOp2(OP_OR
, TYPE_U8
, pred
, pred
, p1
);
1323 if (su
->tex
.target
== TEX_TARGET_BUFFER
) {
1327 // bf == g[] address & 0xff
1328 // eau == g[] address >> 8
1329 bld
.mkOp3(OP_PERMT
, TYPE_U32
, bf
, lo
, bld
.loadImm(NULL
, 0x6540), eau
);
1330 bld
.mkOp3(OP_PERMT
, TYPE_U32
, eau
, zero
, bld
.loadImm(NULL
, 0x0007), eau
);
1332 if (su
->op
== OP_SULDP
&& su
->tex
.target
== TEX_TARGET_BUFFER
) {
1333 // Convert from u32 to u8 address format, which is what the library code
1334 // doing SULDP currently uses.
1335 // XXX: can SUEAU do this ?
1336 // XXX: does it matter that we don't mask high bytes in bf ?
1338 bld
.mkOp2(OP_SHR
, TYPE_U32
, off
, bf
, bld
.mkImm(8));
1339 bld
.mkOp2(OP_ADD
, TYPE_U32
, eau
, eau
, off
);
1342 bld
.mkOp2(OP_MERGE
, TYPE_U64
, addr
, bf
, eau
);
1344 if (atom
&& su
->tex
.target
== TEX_TARGET_BUFFER
)
1345 bld
.mkOp2(OP_ADD
, TYPE_U64
, addr
, addr
, off
);
1347 // let's just set it 0 for raw access and hope it works
1349 bld
.mkImm(0) : loadResInfo32(NULL
, base
+ NVE4_SU_INFO_FMT
);
1351 // get rid of old coordinate sources, make space for fmt info and predicate
1352 su
->moveSources(arg
, 3 - arg
);
1353 // set 64 bit address and 32-bit format sources
1354 su
->setSrc(0, addr
);
1356 su
->setSrc(2, pred
);
1360 NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction
*su
)
1362 processSurfaceCoordsNVE4(su
);
1364 // Who do we hate more ? The person who decided that nvc0's SULD doesn't
1365 // have to support conversion or the person who decided that, in OpenCL,
1366 // you don't have to specify the format here like you do in OpenGL ?
1368 if (su
->op
== OP_SULDP
) {
1369 // We don't patch shaders. Ever.
1370 // You get an indirect call to our library blob here.
1371 // But at least it's uniform.
1372 FlowInstruction
*call
;
1375 uint16_t base
= su
->tex
.r
* NVE4_SU_INFO__STRIDE
+ NVE4_SU_INFO_CALL
;
1377 for (int i
= 0; i
< 4; ++i
)
1378 (r
[i
] = bld
.getScratch(4, FILE_GPR
))->reg
.data
.id
= i
;
1379 for (int i
= 0; i
< 3; ++i
)
1380 (p
[i
] = bld
.getScratch(1, FILE_PREDICATE
))->reg
.data
.id
= i
;
1381 (r
[4] = bld
.getScratch(8, FILE_GPR
))->reg
.data
.id
= 4;
1383 bld
.mkMov(p
[1], bld
.mkImm((su
->cache
== CACHE_CA
) ? 1 : 0), TYPE_U8
);
1384 bld
.mkMov(p
[2], bld
.mkImm((su
->cache
== CACHE_CG
) ? 1 : 0), TYPE_U8
);
1385 bld
.mkMov(p
[0], su
->getSrc(2), TYPE_U8
);
1386 bld
.mkMov(r
[4], su
->getSrc(0), TYPE_U64
);
1387 bld
.mkMov(r
[2], su
->getSrc(1), TYPE_U32
);
1389 call
= bld
.mkFlow(OP_CALL
, NULL
, su
->cc
, su
->getPredicate());
1393 call
->setSrc(0, bld
.mkSymbol(FILE_MEMORY_CONST
,
1394 prog
->driver
->io
.resInfoCBSlot
, TYPE_U32
,
1395 prog
->driver
->io
.suInfoBase
+ base
));
1396 call
->setSrc(1, r
[2]);
1397 call
->setSrc(2, r
[4]);
1398 for (int i
= 0; i
< 3; ++i
)
1399 call
->setSrc(3 + i
, p
[i
]);
1400 for (int i
= 0; i
< 4; ++i
) {
1401 call
->setDef(i
, r
[i
]);
1402 bld
.mkMov(su
->getDef(i
), r
[i
]);
1404 call
->setDef(4, p
[1]);
1405 delete_Instruction(bld
.getProgram(), su
);
1408 if (su
->op
== OP_SUREDB
|| su
->op
== OP_SUREDP
) {
1409 // FIXME: for out of bounds access, destination value will be undefined !
1410 Value
*pred
= su
->getSrc(2);
1411 CondCode cc
= CC_NOT_P
;
1412 if (su
->getPredicate()) {
1413 pred
= bld
.getScratch(1, FILE_PREDICATE
);
1415 if (cc
== CC_NOT_P
) {
1416 bld
.mkOp2(OP_OR
, TYPE_U8
, pred
, su
->getPredicate(), su
->getSrc(2));
1418 bld
.mkOp2(OP_AND
, TYPE_U8
, pred
, su
->getPredicate(), su
->getSrc(2));
1419 pred
->getInsn()->src(1).mod
= Modifier(NV50_IR_MOD_NOT
);
1422 Instruction
*red
= bld
.mkOp(OP_ATOM
, su
->dType
, su
->getDef(0));
1423 red
->subOp
= su
->subOp
;
1425 gMemBase
= bld
.mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, 0);
1426 red
->setSrc(0, gMemBase
);
1427 red
->setSrc(1, su
->getSrc(3));
1428 if (su
->subOp
== NV50_IR_SUBOP_ATOM_CAS
)
1429 red
->setSrc(2, su
->getSrc(4));
1430 red
->setIndirect(0, 0, su
->getSrc(0));
1431 red
->setPredicate(cc
, pred
);
1432 delete_Instruction(bld
.getProgram(), su
);
1433 handleCasExch(red
, true);
1435 su
->sType
= (su
->tex
.target
== TEX_TARGET_BUFFER
) ? TYPE_U32
: TYPE_U8
;
1440 NVC0LoweringPass::handleWRSV(Instruction
*i
)
1446 // must replace, $sreg are not writeable
1447 addr
= targ
->getSVAddress(FILE_SHADER_OUTPUT
, i
->getSrc(0)->asSym());
1450 sym
= bld
.mkSymbol(FILE_SHADER_OUTPUT
, 0, i
->sType
, addr
);
1452 st
= bld
.mkStore(OP_EXPORT
, i
->dType
, sym
, i
->getIndirect(0, 0),
1454 st
->perPatch
= i
->perPatch
;
1456 bld
.getBB()->remove(i
);
1461 NVC0LoweringPass::readTessCoord(LValue
*dst
, int c
)
1463 Value
*laneid
= bld
.getSSA();
1466 bld
.mkOp1(OP_RDSV
, TYPE_U32
, laneid
, bld
.mkSysVal(SV_LANEID
, 0));
1481 bld
.mkFetch(x
, TYPE_F32
, FILE_SHADER_OUTPUT
, 0x2f0, NULL
, laneid
);
1483 bld
.mkFetch(y
, TYPE_F32
, FILE_SHADER_OUTPUT
, 0x2f4, NULL
, laneid
);
1486 bld
.mkOp2(OP_ADD
, TYPE_F32
, dst
, x
, y
);
1487 bld
.mkOp2(OP_SUB
, TYPE_F32
, dst
, bld
.loadImm(NULL
, 1.0f
), dst
);
1492 NVC0LoweringPass::handleRDSV(Instruction
*i
)
1494 Symbol
*sym
= i
->getSrc(0)->asSym();
1495 const SVSemantic sv
= sym
->reg
.data
.sv
.sv
;
1498 uint32_t addr
= targ
->getSVAddress(FILE_SHADER_INPUT
, sym
);
1500 if (addr
>= 0x400) {
1502 if (sym
->reg
.data
.sv
.index
== 3) {
1503 // TGSI backend may use 4th component of TID,NTID,CTAID,NCTAID
1505 i
->setSrc(0, bld
.mkImm((sv
== SV_NTID
|| sv
== SV_NCTAID
) ? 1 : 0));
1507 if (sv
== SV_VERTEX_COUNT
) {
1508 bld
.setPosition(i
, true);
1509 bld
.mkOp2(OP_EXTBF
, TYPE_U32
, i
->getDef(0), i
->getDef(0), bld
.mkImm(0x808));
1516 assert(prog
->getType() == Program::TYPE_FRAGMENT
);
1517 if (i
->srcExists(1)) {
1518 // Pass offset through to the interpolation logic
1519 ld
= bld
.mkInterp(NV50_IR_INTERP_LINEAR
| NV50_IR_INTERP_OFFSET
,
1520 i
->getDef(0), addr
, NULL
);
1521 ld
->setSrc(1, i
->getSrc(1));
1523 bld
.mkInterp(NV50_IR_INTERP_LINEAR
, i
->getDef(0), addr
, NULL
);
1528 Value
*face
= i
->getDef(0);
1529 bld
.mkInterp(NV50_IR_INTERP_FLAT
, face
, addr
, NULL
);
1530 if (i
->dType
== TYPE_F32
) {
1531 bld
.mkOp2(OP_OR
, TYPE_U32
, face
, face
, bld
.mkImm(0x00000001));
1532 bld
.mkOp1(OP_NEG
, TYPE_S32
, face
, face
);
1533 bld
.mkCvt(OP_CVT
, TYPE_F32
, face
, TYPE_S32
, face
);
1538 assert(prog
->getType() == Program::TYPE_TESSELLATION_EVAL
);
1539 readTessCoord(i
->getDef(0)->asLValue(), i
->getSrc(0)->reg
.data
.sv
.index
);
1544 assert(targ
->getChipset() >= NVISA_GK104_CHIPSET
); // mov $sreg otherwise
1545 if (sym
->reg
.data
.sv
.index
== 3) {
1547 i
->setSrc(0, bld
.mkImm(sv
== SV_GRIDID
? 0 : 1));
1550 addr
+= prog
->driver
->prop
.cp
.gridInfoBase
;
1551 bld
.mkLoad(TYPE_U32
, i
->getDef(0),
1552 bld
.mkSymbol(FILE_MEMORY_CONST
, 0, TYPE_U32
, addr
), NULL
);
1554 case SV_SAMPLE_INDEX
:
1555 // TODO: Properly pass source as an address in the PIX address space
1556 // (which can be of the form [r0+offset]). But this is currently
1558 ld
= bld
.mkOp1(OP_PIXLD
, TYPE_U32
, i
->getDef(0), bld
.mkImm(0));
1559 ld
->subOp
= NV50_IR_SUBOP_PIXLD_SAMPLEID
;
1561 case SV_SAMPLE_POS
: {
1562 Value
*off
= new_LValue(func
, FILE_GPR
);
1563 ld
= bld
.mkOp1(OP_PIXLD
, TYPE_U32
, i
->getDef(0), bld
.mkImm(0));
1564 ld
->subOp
= NV50_IR_SUBOP_PIXLD_SAMPLEID
;
1565 bld
.mkOp2(OP_SHL
, TYPE_U32
, off
, i
->getDef(0), bld
.mkImm(3));
1566 bld
.mkLoad(TYPE_F32
,
1569 FILE_MEMORY_CONST
, prog
->driver
->io
.resInfoCBSlot
,
1570 TYPE_U32
, prog
->driver
->io
.sampleInfoBase
+
1571 4 * sym
->reg
.data
.sv
.index
),
1575 case SV_SAMPLE_MASK
:
1576 ld
= bld
.mkOp1(OP_PIXLD
, TYPE_U32
, i
->getDef(0), bld
.mkImm(0));
1577 ld
->subOp
= NV50_IR_SUBOP_PIXLD_COVMASK
;
1580 case SV_BASEINSTANCE
:
1582 ld
= bld
.mkLoad(TYPE_U32
, i
->getDef(0),
1583 bld
.mkSymbol(FILE_MEMORY_CONST
,
1584 prog
->driver
->io
.auxCBSlot
,
1586 prog
->driver
->io
.drawInfoBase
+
1587 4 * (sv
- SV_BASEVERTEX
)),
1591 if (prog
->getType() == Program::TYPE_TESSELLATION_EVAL
&& !i
->perPatch
)
1592 vtx
= bld
.mkOp1v(OP_PFETCH
, TYPE_U32
, bld
.getSSA(), bld
.mkImm(0));
1593 ld
= bld
.mkFetch(i
->getDef(0), i
->dType
,
1594 FILE_SHADER_INPUT
, addr
, i
->getIndirect(0, 0), vtx
);
1595 ld
->perPatch
= i
->perPatch
;
1598 bld
.getBB()->remove(i
);
1603 NVC0LoweringPass::handleDIV(Instruction
*i
)
1605 if (!isFloatType(i
->dType
))
1607 bld
.setPosition(i
, false);
1608 Instruction
*rcp
= bld
.mkOp1(OP_RCP
, i
->dType
, bld
.getSSA(typeSizeof(i
->dType
)), i
->getSrc(1));
1610 i
->setSrc(1, rcp
->getDef(0));
1615 NVC0LoweringPass::handleMOD(Instruction
*i
)
1617 if (!isFloatType(i
->dType
))
1619 LValue
*value
= bld
.getScratch(typeSizeof(i
->dType
));
1620 bld
.mkOp1(OP_RCP
, i
->dType
, value
, i
->getSrc(1));
1621 bld
.mkOp2(OP_MUL
, i
->dType
, value
, i
->getSrc(0), value
);
1622 bld
.mkOp1(OP_TRUNC
, i
->dType
, value
, value
);
1623 bld
.mkOp2(OP_MUL
, i
->dType
, value
, i
->getSrc(1), value
);
1625 i
->setSrc(1, value
);
1630 NVC0LoweringPass::handleSQRT(Instruction
*i
)
1632 Value
*pred
= bld
.getSSA(1, FILE_PREDICATE
);
1633 Value
*zero
= bld
.getSSA();
1636 bld
.mkOp1(OP_MOV
, TYPE_U32
, zero
, bld
.mkImm(0));
1637 if (i
->dType
== TYPE_F64
)
1638 zero
= bld
.mkOp2v(OP_MERGE
, TYPE_U64
, bld
.getSSA(8), zero
, zero
);
1639 bld
.mkCmp(OP_SET
, CC_LE
, i
->dType
, pred
, i
->dType
, i
->getSrc(0), zero
);
1640 bld
.mkOp1(OP_MOV
, i
->dType
, i
->getDef(0), zero
)->setPredicate(CC_P
, pred
);
1641 rsq
= bld
.mkOp1(OP_RSQ
, i
->dType
,
1642 bld
.getSSA(typeSizeof(i
->dType
)), i
->getSrc(0));
1643 rsq
->setPredicate(CC_NOT_P
, pred
);
1645 i
->setSrc(1, rsq
->getDef(0));
1646 i
->setPredicate(CC_NOT_P
, pred
);
1653 NVC0LoweringPass::handlePOW(Instruction
*i
)
1655 LValue
*val
= bld
.getScratch();
1657 bld
.mkOp1(OP_LG2
, TYPE_F32
, val
, i
->getSrc(0));
1658 bld
.mkOp2(OP_MUL
, TYPE_F32
, val
, i
->getSrc(1), val
)->dnz
= 1;
1659 bld
.mkOp1(OP_PREEX2
, TYPE_F32
, val
, val
);
1669 NVC0LoweringPass::handleEXPORT(Instruction
*i
)
1671 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1672 int id
= i
->getSrc(0)->reg
.data
.offset
/ 4;
1674 if (i
->src(0).isIndirect(0)) // TODO, ugly
1677 i
->subOp
= NV50_IR_SUBOP_MOV_FINAL
;
1678 i
->src(0).set(i
->src(1));
1680 i
->setDef(0, new_LValue(func
, FILE_GPR
));
1681 i
->getDef(0)->reg
.data
.id
= id
;
1683 prog
->maxGPR
= MAX2(prog
->maxGPR
, id
);
1685 if (prog
->getType() == Program::TYPE_GEOMETRY
) {
1686 i
->setIndirect(0, 1, gpEmitAddress
);
1692 NVC0LoweringPass::handleOUT(Instruction
*i
)
1694 Instruction
*prev
= i
->prev
;
1695 ImmediateValue stream
, prevStream
;
1697 // Only merge if the stream ids match. Also, note that the previous
1698 // instruction would have already been lowered, so we take arg1 from it.
1699 if (i
->op
== OP_RESTART
&& prev
&& prev
->op
== OP_EMIT
&&
1700 i
->src(0).getImmediate(stream
) &&
1701 prev
->src(1).getImmediate(prevStream
) &&
1702 stream
.reg
.data
.u32
== prevStream
.reg
.data
.u32
) {
1703 i
->prev
->subOp
= NV50_IR_SUBOP_EMIT_RESTART
;
1704 delete_Instruction(prog
, i
);
1706 assert(gpEmitAddress
);
1707 i
->setDef(0, gpEmitAddress
);
1708 i
->setSrc(1, i
->getSrc(0));
1709 i
->setSrc(0, gpEmitAddress
);
1714 // Generate a binary predicate if an instruction is predicated by
1715 // e.g. an f32 value.
1717 NVC0LoweringPass::checkPredicate(Instruction
*insn
)
1719 Value
*pred
= insn
->getPredicate();
1722 if (!pred
|| pred
->reg
.file
== FILE_PREDICATE
)
1724 pdst
= new_LValue(func
, FILE_PREDICATE
);
1726 // CAUTION: don't use pdst->getInsn, the definition might not be unique,
1727 // delay turning PSET(FSET(x,y),0) into PSET(x,y) to a later pass
1729 bld
.mkCmp(OP_SET
, CC_NEU
, insn
->dType
, pdst
, insn
->dType
, bld
.mkImm(0), pred
);
1731 insn
->setPredicate(insn
->cc
, pdst
);
1735 // - add quadop dance for texturing
1736 // - put FP outputs in GPRs
1737 // - convert instruction sequences
1740 NVC0LoweringPass::visit(Instruction
*i
)
1743 bld
.setPosition(i
, false);
1745 if (i
->cc
!= CC_ALWAYS
)
1754 return handleTEX(i
->asTex());
1756 return handleTXD(i
->asTex());
1758 return handleTXLQ(i
->asTex());
1760 return handleTXQ(i
->asTex());
1762 bld
.mkOp1(OP_PREEX2
, TYPE_F32
, i
->getDef(0), i
->getSrc(0));
1763 i
->setSrc(0, i
->getDef(0));
1766 return handlePOW(i
);
1768 return handleDIV(i
);
1770 return handleMOD(i
);
1772 return handleSQRT(i
);
1774 ret
= handleEXPORT(i
);
1778 return handleOUT(i
);
1780 return handleRDSV(i
);
1782 return handleWRSV(i
);
1784 if (i
->src(0).getFile() == FILE_SHADER_INPUT
) {
1785 if (prog
->getType() == Program::TYPE_COMPUTE
) {
1786 i
->getSrc(0)->reg
.file
= FILE_MEMORY_CONST
;
1787 i
->getSrc(0)->reg
.fileIndex
= 0;
1789 if (prog
->getType() == Program::TYPE_GEOMETRY
&&
1790 i
->src(0).isIndirect(0)) {
1791 // XXX: this assumes vec4 units
1792 Value
*ptr
= bld
.mkOp2v(OP_SHL
, TYPE_U32
, bld
.getSSA(),
1793 i
->getIndirect(0, 0), bld
.mkImm(4));
1794 i
->setIndirect(0, 0, ptr
);
1798 assert(prog
->getType() != Program::TYPE_FRAGMENT
); // INTERP
1800 } else if (i
->src(0).getFile() == FILE_MEMORY_CONST
) {
1801 if (i
->src(0).isIndirect(1)) {
1803 if (i
->src(0).isIndirect(0))
1804 ptr
= bld
.mkOp3v(OP_INSBF
, TYPE_U32
, bld
.getSSA(),
1805 i
->getIndirect(0, 1), bld
.mkImm(0x1010),
1806 i
->getIndirect(0, 0));
1808 ptr
= bld
.mkOp2v(OP_SHL
, TYPE_U32
, bld
.getSSA(),
1809 i
->getIndirect(0, 1), bld
.mkImm(16));
1810 i
->setIndirect(0, 1, NULL
);
1811 i
->setIndirect(0, 0, ptr
);
1812 i
->subOp
= NV50_IR_SUBOP_LDC_IS
;
1814 } else if (i
->src(0).getFile() == FILE_SHADER_OUTPUT
) {
1815 assert(prog
->getType() == Program::TYPE_TESSELLATION_CONTROL
);
1821 const bool cctl
= i
->src(0).getFile() == FILE_MEMORY_GLOBAL
;
1823 handleCasExch(i
, cctl
);
1832 if (targ
->getChipset() >= NVISA_GK104_CHIPSET
)
1833 handleSurfaceOpNVE4(i
->asTex());
1839 /* Kepler+ has a special opcode to compute a new base address to be used
1840 * for indirect loads.
1842 if (targ
->getChipset() >= NVISA_GK104_CHIPSET
&& !i
->perPatch
&&
1843 (i
->op
== OP_VFETCH
|| i
->op
== OP_EXPORT
) && i
->src(0).isIndirect(0)) {
1844 Instruction
*afetch
= bld
.mkOp1(OP_AFETCH
, TYPE_U32
, bld
.getSSA(),
1845 cloneShallow(func
, i
->getSrc(0)));
1846 afetch
->setIndirect(0, 0, i
->getIndirect(0, 0));
1847 i
->src(0).get()->reg
.data
.offset
= 0;
1848 i
->setIndirect(0, 0, afetch
->getDef(0));
1855 TargetNVC0::runLegalizePass(Program
*prog
, CGStage stage
) const
1857 if (stage
== CG_STAGE_PRE_SSA
) {
1858 NVC0LoweringPass
pass(prog
);
1859 return pass
.run(prog
, false, true);
1861 if (stage
== CG_STAGE_POST_RA
) {
1862 NVC0LegalizePostRA
pass(prog
);
1863 return pass
.run(prog
, false, true);
1865 if (stage
== CG_STAGE_SSA
) {
1866 NVC0LegalizeSSA pass
;
1867 return pass
.run(prog
, false, true);
1872 } // namespace nv50_ir