i->ftz = true;
}
+void
+NVC0LegalizeSSA::handleTEXLOD(TexInstruction *i)
+{
+ if (i->tex.levelZero)
+ return;
+
+ ImmediateValue lod;
+
+ // The LOD argument comes right after the coordinates (before depth bias,
+ // offsets, etc).
+ int arg = i->tex.target.getArgCount();
+
+ // SM30+ stores the indirect handle as a separate arg, which comes before
+ // the LOD.
+ if (prog->getTarget()->getChipset() >= NVISA_GK104_CHIPSET &&
+ i->tex.rIndirectSrc >= 0)
+ arg++;
+ // SM20 stores indirect handle combined with array coordinate
+ if (prog->getTarget()->getChipset() < NVISA_GK104_CHIPSET &&
+ !i->tex.target.isArray() &&
+ i->tex.rIndirectSrc >= 0)
+ arg++;
+
+ if (!i->src(arg).getImmediate(lod) || !lod.isInteger(0))
+ return;
+
+ if (i->op == OP_TXL)
+ i->op = OP_TEX;
+ i->tex.levelZero = true;
+ i->moveSources(arg + 1, -1);
+}
+
+void
+NVC0LegalizeSSA::handleShift(Instruction *lo)
+{
+ Value *shift = lo->getSrc(1);
+ Value *dst64 = lo->getDef(0);
+ Value *src[2], *dst[2];
+ operation op = lo->op;
+
+ bld.setPosition(lo, false);
+
+ bld.mkSplit(src, 4, lo->getSrc(0));
+
+ // SM30 and prior don't have the fancy new SHF.L/R ops. So the logic has to
+ // be completely emulated. For SM35+, we can use the more directed SHF
+ // operations.
+ if (prog->getTarget()->getChipset() < NVISA_GK20A_CHIPSET) {
+ // The strategy here is to handle shifts >= 32 and less than 32 as
+ // separate parts.
+ //
+ // For SHL:
+ // If the shift is <= 32, then
+ // (HI,LO) << x = (HI << x | (LO >> (32 - x)), LO << x)
+ // If the shift is > 32, then
+ // (HI,LO) << x = (LO << (x - 32), 0)
+ //
+ // For SHR:
+ // If the shift is <= 32, then
+ // (HI,LO) >> x = (HI >> x, (HI << (32 - x)) | LO >> x)
+ // If the shift is > 32, then
+ // (HI,LO) >> x = (0, HI >> (x - 32))
+ //
+ // Note that on NVIDIA hardware, a shift > 32 yields a 0 value, which we
+ // can use to our advantage. Also note the structural similarities
+ // between the right/left cases. The main difference is swapping hi/lo
+ // on input and output.
+
+ Value *x32_minus_shift, *pred, *hi1, *hi2;
+ DataType type = isSignedIntType(lo->dType) ? TYPE_S32 : TYPE_U32;
+ operation antiop = op == OP_SHR ? OP_SHL : OP_SHR;
+ if (op == OP_SHR)
+ std::swap(src[0], src[1]);
+ bld.mkOp2(OP_ADD, TYPE_U32, (x32_minus_shift = bld.getSSA()), shift, bld.mkImm(0x20))
+ ->src(0).mod = Modifier(NV50_IR_MOD_NEG);
+ bld.mkCmp(OP_SET, CC_LE, TYPE_U8, (pred = bld.getSSA(1, FILE_PREDICATE)),
+ TYPE_U32, shift, bld.mkImm(32));
+ // Compute HI (shift <= 32)
+ bld.mkOp2(OP_OR, TYPE_U32, (hi1 = bld.getSSA()),
+ bld.mkOp2v(op, TYPE_U32, bld.getSSA(), src[1], shift),
+ bld.mkOp2v(antiop, TYPE_U32, bld.getSSA(), src[0], x32_minus_shift))
+ ->setPredicate(CC_P, pred);
+ // Compute LO (all shift values)
+ bld.mkOp2(op, type, (dst[0] = bld.getSSA()), src[0], shift);
+ // Compute HI (shift > 32)
+ bld.mkOp2(op, type, (hi2 = bld.getSSA()), src[1],
+ bld.mkOp1v(OP_NEG, TYPE_S32, bld.getSSA(), x32_minus_shift))
+ ->setPredicate(CC_NOT_P, pred);
+ bld.mkOp2(OP_UNION, TYPE_U32, (dst[1] = bld.getSSA()), hi1, hi2);
+ if (op == OP_SHR)
+ std::swap(dst[0], dst[1]);
+ bld.mkOp2(OP_MERGE, TYPE_U64, dst64, dst[0], dst[1]);
+ delete_Instruction(prog, lo);
+ return;
+ }
+
+ Instruction *hi = new_Instruction(func, op, TYPE_U32);
+ lo->bb->insertAfter(lo, hi);
+
+ hi->sType = lo->sType;
+ lo->dType = TYPE_U32;
+
+ hi->setDef(0, (dst[1] = bld.getSSA()));
+ if (lo->op == OP_SHR)
+ hi->subOp |= NV50_IR_SUBOP_SHIFT_HIGH;
+ lo->setDef(0, (dst[0] = bld.getSSA()));
+
+ bld.setPosition(hi, true);
+
+ if (lo->op == OP_SHL)
+ std::swap(hi, lo);
+
+ hi->setSrc(0, new_ImmediateValue(prog, 0u));
+ hi->setSrc(1, shift);
+ hi->setSrc(2, lo->op == OP_SHL ? src[0] : src[1]);
+
+ lo->setSrc(0, src[0]);
+ lo->setSrc(1, shift);
+ lo->setSrc(2, src[1]);
+
+ bld.mkOp2(OP_MERGE, TYPE_U64, dst64, dst[0], dst[1]);
+}
+
+void
+NVC0LegalizeSSA::handleSET(CmpInstruction *cmp)
+{
+ DataType hTy = cmp->sType == TYPE_S64 ? TYPE_S32 : TYPE_U32;
+ Value *carry;
+ Value *src0[2], *src1[2];
+ bld.setPosition(cmp, false);
+
+ bld.mkSplit(src0, 4, cmp->getSrc(0));
+ bld.mkSplit(src1, 4, cmp->getSrc(1));
+ bld.mkOp2(OP_SUB, hTy, NULL, src0[0], src1[0])
+ ->setFlagsDef(0, (carry = bld.getSSA(1, FILE_FLAGS)));
+ cmp->setFlagsSrc(cmp->srcCount(), carry);
+ cmp->setSrc(0, src0[1]);
+ cmp->setSrc(1, src1[1]);
+ cmp->sType = hTy;
+}
+
bool
NVC0LegalizeSSA::visit(Function *fn)
{
Instruction *next;
for (Instruction *i = bb->getEntry(); i; i = next) {
next = i->next;
- if (i->sType == TYPE_F32) {
- if (prog->getType() != Program::TYPE_COMPUTE)
- handleFTZ(i);
- continue;
- }
+
+ if (i->sType == TYPE_F32 && prog->getType() != Program::TYPE_COMPUTE)
+ handleFTZ(i);
+
switch (i->op) {
case OP_DIV:
case OP_MOD:
- handleDIV(i);
+ if (i->sType != TYPE_F32)
+ handleDIV(i);
break;
case OP_RCP:
case OP_RSQ:
if (i->dType == TYPE_F64)
handleRCPRSQ(i);
break;
+ case OP_TXL:
+ case OP_TXF:
+ handleTEXLOD(i->asTex());
+ break;
+ case OP_SHR:
+ case OP_SHL:
+ if (typeSizeof(i->sType) == 8)
+ handleShift(i);
+ break;
+ case OP_SET:
+ case OP_SET_AND:
+ case OP_SET_OR:
+ case OP_SET_XOR:
+ if (typeSizeof(i->sType) == 8 && i->sType != TYPE_F64)
+ handleSET(i->asCmp());
+ break;
default:
break;
}
: rZero(NULL),
carry(NULL),
pOne(NULL),
- needTexBar(prog->getTarget()->getChipset() >= 0xe0)
+ needTexBar(prog->getTarget()->getChipset() >= 0xe0 &&
+ prog->getTarget()->getChipset() < 0x110)
{
}
} else {
// TODO: Move this to before register allocation for operations that
// need the $c register !
- if (typeSizeof(i->dType) == 8) {
+ if (typeSizeof(i->sType) == 8 || typeSizeof(i->dType) == 8) {
Instruction *hi;
hi = BuildUtil::split64BitOpPostRA(func, i, rZero, carry);
if (hi)
NVC0LoweringPass::NVC0LoweringPass(Program *prog) : targ(prog->getTarget())
{
bld.setProgram(prog);
- gMemBase = NULL;
}
bool
i->setIndirectR(hnd);
i->setIndirectS(NULL);
} else if (i->tex.r == i->tex.s || i->op == OP_TXF) {
- i->tex.r += prog->driver->io.texBindBase / 4;
+ if (i->tex.r == 0xffff)
+ i->tex.r = prog->driver->io.fbtexBindBase / 4;
+ else
+ i->tex.r += prog->driver->io.texBindBase / 4;
i->tex.s = 0; // only a single cX[] value possible here
} else {
Value *hnd = bld.getScratch();
i->tex.rIndirectSrc = 0;
i->tex.sIndirectSrc = -1;
}
+ // Move the indirect reference to right after the coords
+ else if (i->tex.rIndirectSrc >= 0 && chipset >= NVISA_GM107_CHIPSET) {
+ Value *hnd = i->getIndirectR();
+
+ i->setIndirectR(NULL);
+ i->moveSources(arg, 1);
+ i->setSrc(arg, hnd);
+ i->tex.rIndirectSrc = 0;
+ i->tex.sIndirectSrc = -1;
+ }
} else
// (nvc0) generate and move the tsc/tic/array source to the front
if (i->tex.target.isArray() || i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
Value *ticRel = i->getIndirectR();
Value *tscRel = i->getIndirectS();
+ if (i->tex.r == 0xffff) {
+ i->tex.r = 0x20;
+ i->tex.s = 0x10;
+ }
+
if (ticRel) {
i->setSrc(i->tex.rIndirectSrc, NULL);
if (i->tex.r)
for (n = 0; n < i->tex.useOffsets; n++) {
for (c = 0; c < 2; ++c) {
if ((n % 2) == 0 && c == 0)
- offs[n / 2] = i->offset[n][c].get();
+ bld.mkMov(offs[n / 2] = bld.getScratch(), i->offset[n][c].get());
else
bld.mkOp3(OP_INSBF, TYPE_U32,
offs[n / 2],
mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U64, off + 8), ptr);
}
-inline Value *
-NVC0LoweringPass::loadSuInfo64(Value *ptr, uint32_t off)
-{
- return loadResInfo64(ptr, off, prog->driver->io.suInfoBase);
-}
-
-inline Value *
-NVC0LoweringPass::loadSuLength32(Value *ptr, uint32_t off)
-{
- return loadResLength32(ptr, off, prog->driver->io.suInfoBase);
-}
-
-inline Value *
-NVC0LoweringPass::loadBufInfo32(Value *ptr, uint32_t off)
-{
- return loadResInfo32(ptr, off, prog->driver->io.bufInfoBase);
-}
-
inline Value *
NVC0LoweringPass::loadBufInfo64(Value *ptr, uint32_t off)
{
return loadResLength32(ptr, off, prog->driver->io.bufInfoBase);
}
-inline Value *
-NVC0LoweringPass::loadUboInfo32(Value *ptr, uint32_t off)
-{
- return loadResInfo32(ptr, off, prog->driver->io.uboInfoBase);
-}
-
inline Value *
NVC0LoweringPass::loadUboInfo64(Value *ptr, uint32_t off)
{
bld.mkCvt(OP_CVT, TYPE_F32, typedDst[i], TYPE_F16, typedDst[i]);
}
}
+
+ if (format->bgra) {
+ std::swap(typedDst[0], typedDst[2]);
+ }
}
void
convertSurfaceFormat(su);
if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
- Value *pred = su->getSrc(2);
- CondCode cc = CC_NOT_P;
- if (su->getPredicate()) {
- pred = bld.getScratch(1, FILE_PREDICATE);
- cc = su->cc;
- if (cc == CC_NOT_P) {
- bld.mkOp2(OP_OR, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
- } else {
- bld.mkOp2(OP_AND, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
- pred->getInsn()->src(1).mod = Modifier(NV50_IR_MOD_NOT);
- }
- }
+ assert(su->getPredicate());
+ Value *pred =
+ bld.mkOp2v(OP_OR, TYPE_U8, bld.getScratch(1, FILE_PREDICATE),
+ su->getPredicate(), su->getSrc(2));
+
Instruction *red = bld.mkOp(OP_ATOM, su->dType, bld.getSSA());
red->subOp = su->subOp;
- if (!gMemBase)
- gMemBase = bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0);
- red->setSrc(0, gMemBase);
+ red->setSrc(0, bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0));
red->setSrc(1, su->getSrc(3));
if (su->subOp == NV50_IR_SUBOP_ATOM_CAS)
red->setSrc(2, su->getSrc(4));
// performed
Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
- assert(cc == CC_NOT_P);
- red->setPredicate(cc, pred);
+ assert(su->cc == CC_NOT_P);
+ red->setPredicate(su->cc, pred);
mov->setPredicate(CC_P, pred);
bld.mkOp2(OP_UNION, TYPE_U32, su->getDef(0),
adjustCoordinatesMS(su);
+ if (ind) {
+ Value *ptr;
+ ptr = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(), ind, bld.mkImm(su->tex.r));
+ ptr = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(7));
+ su->setIndirectR(ptr);
+ }
+
// get surface coordinates
for (c = 0; c < arg; ++c)
src[c] = su->getSrc(c);
}
}
+void
+NVC0LoweringPass::processSurfaceCoordsGM107(TexInstruction *su)
+{
+ const int slot = su->tex.r;
+ const int dim = su->tex.target.getDim();
+ const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
+ Value *ind = su->getIndirectR();
+ int pos = 0;
+
+ bld.setPosition(su, false);
+
+ // add texture handle
+ switch (su->op) {
+ case OP_SUSTP:
+ pos = 4;
+ break;
+ case OP_SUREDP:
+ pos = (su->subOp == NV50_IR_SUBOP_ATOM_CAS) ? 2 : 1;
+ break;
+ default:
+ assert(pos == 0);
+ break;
+ }
+ su->setSrc(arg + pos, loadTexHandle(ind, slot + 32));
+
+ // prevent read fault when the image is not actually bound
+ CmpInstruction *pred =
+ bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
+ TYPE_U32, bld.mkImm(0),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_ADDR));
+ if (su->op != OP_SUSTP && su->tex.format) {
+ const TexInstruction::ImgFormatDesc *format = su->tex.format;
+ int blockwidth = format->bits[0] + format->bits[1] +
+ format->bits[2] + format->bits[3];
+
+ assert(format->components != 0);
+ // make sure that the format doesn't mismatch when it's not FMT_NONE
+ bld.mkCmp(OP_SET_OR, CC_NE, TYPE_U32, pred->getDef(0),
+ TYPE_U32, bld.loadImm(NULL, blockwidth / 8),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_BSIZE),
+ pred->getDef(0));
+ }
+ su->setPredicate(CC_NOT_P, pred->getDef(0));
+}
+
+void
+NVC0LoweringPass::handleSurfaceOpGM107(TexInstruction *su)
+{
+ processSurfaceCoordsGM107(su);
+
+ if (su->op == OP_SULDP)
+ convertSurfaceFormat(su);
+
+ if (su->op == OP_SUREDP) {
+ Value *def = su->getDef(0);
+
+ su->op = OP_SUREDB;
+ su->setDef(0, bld.getSSA());
+
+ bld.setPosition(su, true);
+
+ // make sure to initialize dst value when the atomic operation is not
+ // performed
+ Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
+
+ assert(su->cc == CC_NOT_P);
+ mov->setPredicate(CC_P, su->getPredicate());
+
+ bld.mkOp2(OP_UNION, TYPE_U32, def, su->getDef(0), mov->getDef(0));
+ }
+}
+
bool
NVC0LoweringPass::handleWRSV(Instruction *i)
{
int8_t fileIndex = i->getSrc(0)->reg.fileIndex - 1;
Value *ind = i->getIndirect(0, 1);
+ if (!ind && fileIndex == -1)
+ return;
+
if (ind) {
// Clamp the UBO index when an indirect access is used to avoid
// loading information from the wrong place in the driver cb.
bld.loadImm(NULL, 12));
}
- if (i->src(0).isIndirect(1)) {
- Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
- Value *ptr = loadUboInfo64(ind, fileIndex * 16);
- Value *length = loadUboLength32(ind, fileIndex * 16);
- Value *pred = new_LValue(func, FILE_PREDICATE);
- if (i->src(0).isIndirect(0)) {
- bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
- bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
- }
- i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
- i->setIndirect(0, 1, NULL);
- i->setIndirect(0, 0, ptr);
- bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
- i->setPredicate(CC_NOT_P, pred);
- if (i->defExists(0)) {
- bld.mkMov(i->getDef(0), bld.mkImm(0));
- }
- } else if (fileIndex >= 0) {
- Value *ptr = loadUboInfo64(ind, fileIndex * 16);
- if (i->src(0).isIndirect(0)) {
- bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
- }
- i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
- i->setIndirect(0, 1, NULL);
- i->setIndirect(0, 0, ptr);
+ Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
+ Value *ptr = loadUboInfo64(ind, fileIndex * 16);
+ Value *length = loadUboLength32(ind, fileIndex * 16);
+ Value *pred = new_LValue(func, FILE_PREDICATE);
+ if (i->src(0).isIndirect(0)) {
+ bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
+ bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
}
+ i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
+ i->setIndirect(0, 1, NULL);
+ i->setIndirect(0, 0, ptr);
+ bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
+ i->setPredicate(CC_NOT_P, pred);
+ Value *zero, *dst = i->getDef(0);
+ i->setDef(0, bld.getSSA());
+
+ bld.setPosition(i, true);
+ bld.mkMov((zero = bld.getSSA()), bld.mkImm(0))
+ ->setPredicate(CC_P, pred);
+ bld.mkOp2(OP_UNION, TYPE_U32, dst, i->getDef(0), zero);
} else if (i->src(0).isIndirect(1)) {
Value *ptr;
if (i->src(0).isIndirect(0))
default:
if (prog->getType() == Program::TYPE_TESSELLATION_EVAL && !i->perPatch)
vtx = bld.mkOp1v(OP_PFETCH, TYPE_U32, bld.getSSA(), bld.mkImm(0));
- ld = bld.mkFetch(i->getDef(0), i->dType,
- FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
- ld->perPatch = i->perPatch;
+ if (prog->getType() == Program::TYPE_FRAGMENT) {
+ bld.mkInterp(NV50_IR_INTERP_FLAT, i->getDef(0), addr, NULL);
+ } else {
+ ld = bld.mkFetch(i->getDef(0), i->dType,
+ FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
+ ld->perPatch = i->perPatch;
+ }
break;
}
bld.getBB()->remove(i);
case OP_SUSTP:
case OP_SUREDB:
case OP_SUREDP:
- if (targ->getChipset() >= NVISA_GK104_CHIPSET)
+ if (targ->getChipset() >= NVISA_GM107_CHIPSET)
+ handleSurfaceOpGM107(i->asTex());
+ else if (targ->getChipset() >= NVISA_GK104_CHIPSET)
handleSurfaceOpNVE4(i->asTex());
else
handleSurfaceOpNVC0(i->asTex());
/* Kepler+ has a special opcode to compute a new base address to be used
* for indirect loads.
+ *
+ * Maxwell+ has an additional similar requirement for indirect
+ * interpolation ops in frag shaders.
*/
- if (targ->getChipset() >= NVISA_GK104_CHIPSET && !i->perPatch &&
- (i->op == OP_VFETCH || i->op == OP_EXPORT) && i->src(0).isIndirect(0)) {
+ bool doAfetch = false;
+ if (targ->getChipset() >= NVISA_GK104_CHIPSET &&
+ !i->perPatch &&
+ (i->op == OP_VFETCH || i->op == OP_EXPORT) &&
+ i->src(0).isIndirect(0)) {
+ doAfetch = true;
+ }
+ if (targ->getChipset() >= NVISA_GM107_CHIPSET &&
+ (i->op == OP_LINTERP || i->op == OP_PINTERP) &&
+ i->src(0).isIndirect(0)) {
+ doAfetch = true;
+ }
+
+ if (doAfetch) {
+ Value *addr = cloneShallow(func, i->getSrc(0));
Instruction *afetch = bld.mkOp1(OP_AFETCH, TYPE_U32, bld.getSSA(),
- cloneShallow(func, i->getSrc(0)));
+ i->getSrc(0));
afetch->setIndirect(0, 0, i->getIndirect(0, 0));
- i->src(0).get()->reg.data.offset = 0;
+ addr->reg.data.offset = 0;
+ i->setSrc(0, addr);
i->setIndirect(0, 0, afetch->getDef(0));
}