{
FlowInstruction *call;
int builtin;
- Value *def[2];
bld.setPosition(i, false);
- def[0] = bld.mkMovToReg(0, i->getSrc(0))->getDef(0);
- def[1] = bld.mkMovToReg(1, i->getSrc(1))->getDef(0);
+
+ // Generate movs to the input regs for the call we want to generate
+ for (int s = 0; i->srcExists(s); ++s) {
+ Instruction *ld = i->getSrc(s)->getInsn();
+ assert(ld->getSrc(0) != NULL);
+ // check if we are moving an immediate, propagate it in that case
+ if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV) ||
+ !(ld->src(0).getFile() == FILE_IMMEDIATE))
+ bld.mkMovToReg(s, i->getSrc(s));
+ else {
+ bld.mkMovToReg(s, ld->getSrc(0));
+ // Clear the src, to make code elimination possible here before we
+ // delete the instruction i later
+ i->setSrc(s, NULL);
+ if (ld->isDead())
+ delete_Instruction(prog, ld);
+ }
+ }
+
switch (i->dType) {
case TYPE_U32: builtin = NVC0_BUILTIN_DIV_U32; break;
case TYPE_S32: builtin = NVC0_BUILTIN_DIV_S32; break;
return;
}
call = bld.mkFlow(OP_CALL, NULL, CC_ALWAYS, NULL);
- bld.mkMov(i->getDef(0), def[(i->op == OP_DIV) ? 0 : 1]);
+ bld.mkMovFromReg(i->getDef(0), i->op == OP_DIV ? 0 : 1);
bld.mkClobber(FILE_GPR, (i->op == OP_DIV) ? 0xe : 0xd, 2);
bld.mkClobber(FILE_PREDICATE, (i->dType == TYPE_S32) ? 0xf : 0x3, 0);
i->ftz = true;
}
+void
+NVC0LegalizeSSA::handleTEXLOD(TexInstruction *i)
+{
+ if (i->tex.levelZero)
+ return;
+
+ ImmediateValue lod;
+
+ // The LOD argument comes right after the coordinates (before depth bias,
+ // offsets, etc).
+ int arg = i->tex.target.getArgCount();
+
+ // SM30+ stores the indirect handle as a separate arg, which comes before
+ // the LOD.
+ if (prog->getTarget()->getChipset() >= NVISA_GK104_CHIPSET &&
+ i->tex.rIndirectSrc >= 0)
+ arg++;
+ // SM20 stores indirect handle combined with array coordinate
+ if (prog->getTarget()->getChipset() < NVISA_GK104_CHIPSET &&
+ !i->tex.target.isArray() &&
+ i->tex.rIndirectSrc >= 0)
+ arg++;
+
+ if (!i->src(arg).getImmediate(lod) || !lod.isInteger(0))
+ return;
+
+ if (i->op == OP_TXL)
+ i->op = OP_TEX;
+ i->tex.levelZero = true;
+ i->moveSources(arg + 1, -1);
+}
+
+void
+NVC0LegalizeSSA::handleShift(Instruction *lo)
+{
+ Value *shift = lo->getSrc(1);
+ Value *dst64 = lo->getDef(0);
+ Value *src[2], *dst[2];
+ operation op = lo->op;
+
+ bld.setPosition(lo, false);
+
+ bld.mkSplit(src, 4, lo->getSrc(0));
+
+ // SM30 and prior don't have the fancy new SHF.L/R ops. So the logic has to
+ // be completely emulated. For SM35+, we can use the more directed SHF
+ // operations.
+ if (prog->getTarget()->getChipset() < NVISA_GK20A_CHIPSET) {
+ // The strategy here is to handle shifts >= 32 and less than 32 as
+ // separate parts.
+ //
+ // For SHL:
+ // If the shift is <= 32, then
+ // (HI,LO) << x = (HI << x | (LO >> (32 - x)), LO << x)
+ // If the shift is > 32, then
+ // (HI,LO) << x = (LO << (x - 32), 0)
+ //
+ // For SHR:
+ // If the shift is <= 32, then
+ // (HI,LO) >> x = (HI >> x, (HI << (32 - x)) | LO >> x)
+ // If the shift is > 32, then
+ // (HI,LO) >> x = (0, HI >> (x - 32))
+ //
+ // Note that on NVIDIA hardware, a shift > 32 yields a 0 value, which we
+ // can use to our advantage. Also note the structural similarities
+ // between the right/left cases. The main difference is swapping hi/lo
+ // on input and output.
+
+ Value *x32_minus_shift, *pred, *hi1, *hi2;
+ DataType type = isSignedIntType(lo->dType) ? TYPE_S32 : TYPE_U32;
+ operation antiop = op == OP_SHR ? OP_SHL : OP_SHR;
+ if (op == OP_SHR)
+ std::swap(src[0], src[1]);
+ bld.mkOp2(OP_ADD, TYPE_U32, (x32_minus_shift = bld.getSSA()), shift, bld.mkImm(0x20))
+ ->src(0).mod = Modifier(NV50_IR_MOD_NEG);
+ bld.mkCmp(OP_SET, CC_LE, TYPE_U8, (pred = bld.getSSA(1, FILE_PREDICATE)),
+ TYPE_U32, shift, bld.mkImm(32));
+ // Compute HI (shift <= 32)
+ bld.mkOp2(OP_OR, TYPE_U32, (hi1 = bld.getSSA()),
+ bld.mkOp2v(op, TYPE_U32, bld.getSSA(), src[1], shift),
+ bld.mkOp2v(antiop, TYPE_U32, bld.getSSA(), src[0], x32_minus_shift))
+ ->setPredicate(CC_P, pred);
+ // Compute LO (all shift values)
+ bld.mkOp2(op, type, (dst[0] = bld.getSSA()), src[0], shift);
+ // Compute HI (shift > 32)
+ bld.mkOp2(op, type, (hi2 = bld.getSSA()), src[0],
+ bld.mkOp1v(OP_NEG, TYPE_S32, bld.getSSA(), x32_minus_shift))
+ ->setPredicate(CC_NOT_P, pred);
+ bld.mkOp2(OP_UNION, TYPE_U32, (dst[1] = bld.getSSA()), hi1, hi2);
+ if (op == OP_SHR)
+ std::swap(dst[0], dst[1]);
+ bld.mkOp2(OP_MERGE, TYPE_U64, dst64, dst[0], dst[1]);
+ delete_Instruction(prog, lo);
+ return;
+ }
+
+ Instruction *hi = new_Instruction(func, op, TYPE_U32);
+ lo->bb->insertAfter(lo, hi);
+
+ hi->sType = lo->sType;
+ lo->dType = TYPE_U32;
+
+ hi->setDef(0, (dst[1] = bld.getSSA()));
+ if (lo->op == OP_SHR)
+ hi->subOp |= NV50_IR_SUBOP_SHIFT_HIGH;
+ lo->setDef(0, (dst[0] = bld.getSSA()));
+
+ bld.setPosition(hi, true);
+
+ if (lo->op == OP_SHL)
+ std::swap(hi, lo);
+
+ hi->setSrc(0, new_ImmediateValue(prog, 0u));
+ hi->setSrc(1, shift);
+ hi->setSrc(2, lo->op == OP_SHL ? src[0] : src[1]);
+
+ lo->setSrc(0, src[0]);
+ lo->setSrc(1, shift);
+ lo->setSrc(2, src[1]);
+
+ bld.mkOp2(OP_MERGE, TYPE_U64, dst64, dst[0], dst[1]);
+}
+
+void
+NVC0LegalizeSSA::handleSET(CmpInstruction *cmp)
+{
+ DataType hTy = cmp->sType == TYPE_S64 ? TYPE_S32 : TYPE_U32;
+ Value *carry;
+ Value *src0[2], *src1[2];
+ bld.setPosition(cmp, false);
+
+ bld.mkSplit(src0, 4, cmp->getSrc(0));
+ bld.mkSplit(src1, 4, cmp->getSrc(1));
+ bld.mkOp2(OP_SUB, hTy, NULL, src0[0], src1[0])
+ ->setFlagsDef(0, (carry = bld.getSSA(1, FILE_FLAGS)));
+ cmp->setFlagsSrc(cmp->srcCount(), carry);
+ cmp->setSrc(0, src0[1]);
+ cmp->setSrc(1, src1[1]);
+ cmp->sType = hTy;
+}
+
bool
NVC0LegalizeSSA::visit(Function *fn)
{
Instruction *next;
for (Instruction *i = bb->getEntry(); i; i = next) {
next = i->next;
- if (i->sType == TYPE_F32) {
- if (prog->getType() != Program::TYPE_COMPUTE)
- handleFTZ(i);
- continue;
- }
+
+ if (i->sType == TYPE_F32 && prog->getType() != Program::TYPE_COMPUTE)
+ handleFTZ(i);
+
switch (i->op) {
case OP_DIV:
case OP_MOD:
- handleDIV(i);
+ if (i->sType != TYPE_F32)
+ handleDIV(i);
break;
case OP_RCP:
case OP_RSQ:
if (i->dType == TYPE_F64)
handleRCPRSQ(i);
break;
+ case OP_TXL:
+ case OP_TXF:
+ handleTEXLOD(i->asTex());
+ break;
+ case OP_SHR:
+ case OP_SHL:
+ if (typeSizeof(i->sType) == 8)
+ handleShift(i);
+ break;
+ case OP_SET:
+ case OP_SET_AND:
+ case OP_SET_OR:
+ case OP_SET_XOR:
+ if (typeSizeof(i->sType) == 8 && i->sType != TYPE_F64)
+ handleSET(i->asCmp());
+ break;
default:
break;
}
NVC0LegalizePostRA::NVC0LegalizePostRA(const Program *prog)
: rZero(NULL),
carry(NULL),
- needTexBar(prog->getTarget()->getChipset() >= 0xe0)
+ pOne(NULL),
+ needTexBar(prog->getTarget()->getChipset() >= 0xe0 &&
+ prog->getTarget()->getChipset() < 0x110)
{
}
Instruction *usei, const Instruction *texi)
{
bool add = true;
- for (std::list<TexUse>::iterator it = uses.begin();
- it != uses.end();) {
- if (insnDominatedBy(usei, it->insn)) {
- add = false;
- break;
- }
- if (insnDominatedBy(it->insn, usei))
- it = uses.erase(it);
- else
+ bool dominated = insnDominatedBy(usei, texi);
+ // Uses before the tex have to all be included. Just because an earlier
+ // instruction dominates another instruction doesn't mean that there's no
+ // way to get from the tex to the later instruction. For example you could
+ // have nested loops, with the tex in the inner loop, and uses before it in
+ // both loops - even though the outer loop's instruction would dominate the
+ // inner's, we still want a texbar before the inner loop's instruction.
+ //
+ // However we can still use the eliding logic between uses dominated by the
+ // tex instruction, as that is unambiguously correct.
+ if (dominated) {
+ for (std::list<TexUse>::iterator it = uses.begin(); it != uses.end();) {
+ if (it->after) {
+ if (insnDominatedBy(usei, it->insn)) {
+ add = false;
+ break;
+ }
+ if (insnDominatedBy(it->insn, usei)) {
+ it = uses.erase(it);
+ continue;
+ }
+ }
++it;
+ }
}
if (add)
- uses.push_back(TexUse(usei, texi));
+ uses.push_back(TexUse(usei, texi, dominated));
}
// While it might be tempting to use the an algorithm that just looks at tex
continue;
for (int d = 0; insn->defExists(d); ++d) {
+ const Value *def = insn->def(d).rep();
if (insn->def(d).getFile() != FILE_GPR ||
- insn->def(d).rep()->reg.data.id < minGPR ||
- insn->def(d).rep()->reg.data.id > maxGPR)
+ def->reg.data.id + def->reg.size / 4 - 1 < minGPR ||
+ def->reg.data.id > maxGPR)
continue;
addTexUse(uses, insn, texi);
return;
}
for (int s = 0; insn->srcExists(s); ++s) {
+ const Value *src = insn->src(s).rep();
if (insn->src(s).getFile() != FILE_GPR ||
- insn->src(s).rep()->reg.data.id < minGPR ||
- insn->src(s).rep()->reg.data.id > maxGPR)
+ src->reg.data.id + src->reg.size / 4 - 1 < minGPR ||
+ src->reg.data.id > maxGPR)
continue;
addTexUse(uses, insn, texi);
return;
insertTextureBarriers(fn);
rZero = new_LValue(fn, FILE_GPR);
+ pOne = new_LValue(fn, FILE_PREDICATE);
carry = new_LValue(fn, FILE_FLAGS);
- rZero->reg.data.id = prog->getTarget()->getFileSize(FILE_GPR);
+ rZero->reg.data.id = (prog->getTarget()->getChipset() >= NVISA_GK20A_CHIPSET) ? 255 : 63;
carry->reg.data.id = 0;
+ pOne->reg.data.id = 7;
return true;
}
for (int s = 0; i->srcExists(s); ++s) {
if (s == 2 && i->op == OP_SUCLAMP)
continue;
+ if (s == 1 && i->op == OP_SHLADD)
+ continue;
ImmediateValue *imm = i->getSrc(s)->asImm();
- if (imm && imm->reg.data.u64 == 0)
- i->setSrc(s, rZero);
+ if (imm) {
+ if (i->op == OP_SELP && s == 2) {
+ i->setSrc(s, pOne);
+ if (imm->reg.data.u64 == 0)
+ i->src(s).mod = i->src(s).mod ^ Modifier(NV50_IR_MOD_NOT);
+ } else if (imm->reg.data.u64 == 0) {
+ i->setSrc(s, rZero);
+ }
+ }
}
}
} else {
// TODO: Move this to before register allocation for operations that
// need the $c register !
- if (typeSizeof(i->dType) == 8) {
+ if (typeSizeof(i->sType) == 8 || typeSizeof(i->dType) == 8) {
Instruction *hi;
hi = BuildUtil::split64BitOpPostRA(func, i, rZero, carry);
if (hi)
NVC0LoweringPass::NVC0LoweringPass(Program *prog) : targ(prog->getTarget())
{
bld.setProgram(prog);
- gMemBase = NULL;
}
bool
inline Value *
NVC0LoweringPass::loadTexHandle(Value *ptr, unsigned int slot)
{
- uint8_t b = prog->driver->io.resInfoCBSlot;
+ uint8_t b = prog->driver->io.auxCBSlot;
uint32_t off = prog->driver->io.texBindBase + slot * 4;
+
+ if (ptr)
+ ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(2));
+
return bld.
mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
}
const int lyr = arg - (i->tex.target.isMS() ? 2 : 1);
const int chipset = prog->getTarget()->getChipset();
+ /* Only normalize in the non-explicit derivatives case. For explicit
+ * derivatives, this is handled in handleManualTXD.
+ */
+ if (i->tex.target.isCube() && i->dPdx[0].get() == NULL) {
+ Value *src[3], *val;
+ int c;
+ for (c = 0; c < 3; ++c)
+ src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), i->getSrc(c));
+ val = bld.getScratch();
+ bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
+ bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
+ bld.mkOp1(OP_RCP, TYPE_F32, val, val);
+ for (c = 0; c < 3; ++c) {
+ i->setSrc(c, bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(),
+ i->getSrc(c), val));
+ }
+ }
+
// Arguments to the TEX instruction are a little insane. Even though the
// encoding is identical between SM20 and SM30, the arguments mean
// different things between Fermi and Kepler+. A lot of arguments are
if (i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
// XXX this ignores tsc, and assumes a 1:1 mapping
assert(i->tex.rIndirectSrc >= 0);
- Value *hnd = loadTexHandle(
- bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
- i->getIndirectR(), bld.mkImm(2)),
- i->tex.r);
- i->tex.r = 0xff;
- i->tex.s = 0x1f;
- i->setIndirectR(hnd);
+ if (!i->tex.bindless) {
+ Value *hnd = loadTexHandle(i->getIndirectR(), i->tex.r);
+ i->tex.r = 0xff;
+ i->tex.s = 0x1f;
+ i->setIndirectR(hnd);
+ }
i->setIndirectS(NULL);
} else if (i->tex.r == i->tex.s || i->op == OP_TXF) {
- i->tex.r += prog->driver->io.texBindBase / 4;
+ if (i->tex.r == 0xffff)
+ i->tex.r = prog->driver->io.fbtexBindBase / 4;
+ else
+ i->tex.r += prog->driver->io.texBindBase / 4;
i->tex.s = 0; // only a single cX[] value possible here
} else {
Value *hnd = bld.getScratch();
i->tex.rIndirectSrc = 0;
i->tex.sIndirectSrc = -1;
}
+ // Move the indirect reference to right after the coords
+ else if (i->tex.rIndirectSrc >= 0 && chipset >= NVISA_GM107_CHIPSET) {
+ Value *hnd = i->getIndirectR();
+
+ i->setIndirectR(NULL);
+ i->moveSources(arg, 1);
+ i->setSrc(arg, hnd);
+ i->tex.rIndirectSrc = 0;
+ i->tex.sIndirectSrc = -1;
+ }
} else
// (nvc0) generate and move the tsc/tic/array source to the front
if (i->tex.target.isArray() || i->tex.rIndirectSrc >= 0 || i->tex.sIndirectSrc >= 0) {
Value *ticRel = i->getIndirectR();
Value *tscRel = i->getIndirectS();
+ if (i->tex.r == 0xffff) {
+ i->tex.r = 0x20;
+ i->tex.s = 0x10;
+ }
+
if (ticRel) {
i->setSrc(i->tex.rIndirectSrc, NULL);
if (i->tex.r)
}
Value *arrayIndex = i->tex.target.isArray() ? i->getSrc(lyr) : NULL;
- for (int s = dim; s >= 1; --s)
- i->setSrc(s, i->getSrc(s - 1));
- i->setSrc(0, arrayIndex);
+ if (arrayIndex) {
+ for (int s = dim; s >= 1; --s)
+ i->setSrc(s, i->getSrc(s - 1));
+ i->setSrc(0, arrayIndex);
+ } else {
+ i->moveSources(0, 1);
+ }
if (arrayIndex) {
int sat = (i->op == OP_TXF) ? 1 : 0;
for (n = 0; n < i->tex.useOffsets; n++) {
for (c = 0; c < 2; ++c) {
if ((n % 2) == 0 && c == 0)
- offs[n / 2] = i->offset[n][c].get();
+ bld.mkMov(offs[n / 2] = bld.getScratch(), i->offset[n][c].get());
else
bld.mkOp3(OP_INSBF, TYPE_U32,
offs[n / 2],
if (chipset >= NVISA_GM107_CHIPSET)
s += dim;
if (i->tex.target.isArray()) {
- bld.mkOp3(OP_INSBF, TYPE_U32, i->getSrc(s),
+ Value *offset = bld.getScratch();
+ bld.mkOp3(OP_INSBF, TYPE_U32, offset,
bld.loadImm(NULL, imm), bld.mkImm(0xc10),
i->getSrc(s));
+ i->setSrc(s, offset);
} else {
i->moveSources(s, 1);
i->setSrc(s, bld.loadImm(NULL, imm << 16));
bool
NVC0LoweringPass::handleManualTXD(TexInstruction *i)
{
- static const uint8_t qOps[4][2] =
- {
- { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(MOV2, MOV2, ADD, ADD) }, // l0
- { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(MOV2, MOV2, ADD, ADD) }, // l1
- { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l2
- { QUADOP(SUBR, MOV2, SUBR, MOV2), QUADOP(SUBR, SUBR, MOV2, MOV2) }, // l3
- };
+ // Always done from the l0 perspective. This is the way that NVIDIA's
+ // driver does it, and doing it from the "current" lane's perpsective
+ // doesn't seem to always work for reasons that aren't altogether clear,
+ // even in frag shaders.
+ //
+ // Note that we must move not only the coordinates into lane0, but also all
+ // ancillary arguments, like array indices and depth compare as they may
+ // differ between lanes. Offsets for TXD are supposed to be uniform, so we
+ // leave them alone.
+ static const uint8_t qOps[2] =
+ { QUADOP(MOV2, ADD, MOV2, ADD), QUADOP(MOV2, MOV2, ADD, ADD) };
+
Value *def[4][4];
- Value *crd[3];
+ Value *crd[3], *arr[2], *shadow;
Instruction *tex;
Value *zero = bld.loadImm(bld.getSSA(), 0);
int l, c;
const int dim = i->tex.target.getDim() + i->tex.target.isCube();
- const int array = i->tex.target.isArray();
+
+ // This function is invoked after handleTEX lowering, so we have to expect
+ // the arguments in the order that the hw wants them. For Fermi, array and
+ // indirect are both in the leading arg, while for Kepler, array and
+ // indirect are separate (and both precede the coordinates). Maxwell is
+ // handled in a separate function.
+ int array;
+ if (targ->getChipset() < NVISA_GK104_CHIPSET)
+ array = i->tex.target.isArray() || i->tex.rIndirectSrc >= 0;
+ else
+ array = i->tex.target.isArray() + (i->tex.rIndirectSrc >= 0);
i->op = OP_TEX; // no need to clone dPdx/dPdy later
for (c = 0; c < dim; ++c)
crd[c] = bld.getScratch();
+ for (c = 0; c < array; ++c)
+ arr[c] = bld.getScratch();
+ shadow = bld.getScratch();
- bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
for (l = 0; l < 4; ++l) {
- // mov coordinates from lane l to all lanes
+ Value *src[3], *val;
+
+ bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
+ // we're using the texture result from lane 0 in all cases, so make sure
+ // that lane 0 is pointing at the proper array index, indirect value,
+ // and depth compare.
+ if (l != 0) {
+ for (c = 0; c < array; ++c)
+ bld.mkQuadop(0x00, arr[c], l, i->getSrc(c), zero);
+ if (i->tex.target.isShadow()) {
+ // The next argument after coords is the depth compare
+ bld.mkQuadop(0x00, shadow, l, i->getSrc(array + dim), zero);
+ }
+ }
+ // mov position coordinates from lane l to all lanes
for (c = 0; c < dim; ++c)
bld.mkQuadop(0x00, crd[c], l, i->getSrc(c + array), zero);
// add dPdx from lane l to lanes dx
for (c = 0; c < dim; ++c)
- bld.mkQuadop(qOps[l][0], crd[c], l, i->dPdx[c].get(), crd[c]);
+ bld.mkQuadop(qOps[0], crd[c], l, i->dPdx[c].get(), crd[c]);
// add dPdy from lane l to lanes dy
for (c = 0; c < dim; ++c)
- bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
+ bld.mkQuadop(qOps[1], crd[c], l, i->dPdy[c].get(), crd[c]);
+ // normalize cube coordinates
+ if (i->tex.target.isCube()) {
+ for (c = 0; c < 3; ++c)
+ src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), crd[c]);
+ val = bld.getScratch();
+ bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
+ bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
+ bld.mkOp1(OP_RCP, TYPE_F32, val, val);
+ for (c = 0; c < 3; ++c)
+ src[c] = bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(), crd[c], val);
+ } else {
+ for (c = 0; c < dim; ++c)
+ src[c] = crd[c];
+ }
// texture
bld.insert(tex = cloneForward(func, i));
+ if (l != 0) {
+ for (c = 0; c < array; ++c)
+ tex->setSrc(c, arr[c]);
+ if (i->tex.target.isShadow())
+ tex->setSrc(array + dim, shadow);
+ }
for (c = 0; c < dim; ++c)
- tex->setSrc(c + array, crd[c]);
+ tex->setSrc(c + array, src[c]);
+ // broadcast results from lane 0 to all lanes so that the moves *into*
+ // the target lane pick up the proper value.
+ if (l != 0)
+ for (c = 0; i->defExists(c); ++c)
+ bld.mkQuadop(0x00, tex->getDef(c), 0, tex->getDef(c), zero);
+ bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
+
// save results
for (c = 0; i->defExists(c); ++c) {
Instruction *mov;
mov->lanes = 1 << l;
}
}
- bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
for (c = 0; i->defExists(c); ++c) {
Instruction *u = bld.mkOp(OP_UNION, TYPE_U32, i->getDef(c));
txd->dPdx[c].set(NULL);
txd->dPdy[c].set(NULL);
}
+
+ // In this case we have fewer than 4 "real" arguments, which means that
+ // handleTEX didn't apply any padding. However we have to make sure that
+ // the second "group" of arguments still gets padded up to 4.
+ if (chipset >= NVISA_GK104_CHIPSET) {
+ int s = arg + 2 * dim;
+ if (s >= 4 && s < 7) {
+ if (txd->srcExists(s)) // move potential predicate out of the way
+ txd->moveSources(s, 7 - s);
+ while (s < 7)
+ txd->setSrc(s++, bld.loadImm(NULL, 0));
+ }
+ }
+
return true;
}
txq->moveSources(0, 1);
txq->setSrc(0, src);
} else {
- Value *hnd = loadTexHandle(
- bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
- txq->getIndirectR(), bld.mkImm(2)),
- txq->tex.r);
+ Value *hnd = loadTexHandle(txq->getIndirectR(), txq->tex.r);
txq->tex.r = 0xff;
txq->tex.s = 0x1f;
}
bool
-NVC0LoweringPass::handleSUQ(Instruction *suq)
+NVC0LoweringPass::handleBUFQ(Instruction *bufq)
{
- suq->op = OP_MOV;
- suq->setSrc(0, loadResLength32(suq->getIndirect(0, 1),
- suq->getSrc(0)->reg.fileIndex * 16));
- suq->setIndirect(0, 0, NULL);
- suq->setIndirect(0, 1, NULL);
+ bufq->op = OP_MOV;
+ bufq->setSrc(0, loadBufLength32(bufq->getIndirect(0, 1),
+ bufq->getSrc(0)->reg.fileIndex * 16));
+ bufq->setIndirect(0, 0, NULL);
+ bufq->setIndirect(0, 1, NULL);
return true;
}
+void
+NVC0LoweringPass::handleSharedATOMNVE4(Instruction *atom)
+{
+ assert(atom->src(0).getFile() == FILE_MEMORY_SHARED);
+
+ BasicBlock *currBB = atom->bb;
+ BasicBlock *tryLockBB = atom->bb->splitBefore(atom, false);
+ BasicBlock *joinBB = atom->bb->splitAfter(atom);
+ BasicBlock *setAndUnlockBB = new BasicBlock(func);
+ BasicBlock *failLockBB = new BasicBlock(func);
+
+ bld.setPosition(currBB, true);
+ assert(!currBB->joinAt);
+ currBB->joinAt = bld.mkFlow(OP_JOINAT, joinBB, CC_ALWAYS, NULL);
+
+ CmpInstruction *pred =
+ bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
+ TYPE_U32, bld.mkImm(0), bld.mkImm(1));
+
+ bld.mkFlow(OP_BRA, tryLockBB, CC_ALWAYS, NULL);
+ currBB->cfg.attach(&tryLockBB->cfg, Graph::Edge::TREE);
+
+ bld.setPosition(tryLockBB, true);
+
+ Instruction *ld =
+ bld.mkLoad(TYPE_U32, atom->getDef(0), atom->getSrc(0)->asSym(),
+ atom->getIndirect(0, 0));
+ ld->setDef(1, bld.getSSA(1, FILE_PREDICATE));
+ ld->subOp = NV50_IR_SUBOP_LOAD_LOCKED;
+
+ bld.mkFlow(OP_BRA, setAndUnlockBB, CC_P, ld->getDef(1));
+ bld.mkFlow(OP_BRA, failLockBB, CC_ALWAYS, NULL);
+ tryLockBB->cfg.attach(&failLockBB->cfg, Graph::Edge::CROSS);
+ tryLockBB->cfg.attach(&setAndUnlockBB->cfg, Graph::Edge::TREE);
+
+ tryLockBB->cfg.detach(&joinBB->cfg);
+ bld.remove(atom);
+
+ bld.setPosition(setAndUnlockBB, true);
+ Value *stVal;
+ if (atom->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
+ // Read the old value, and write the new one.
+ stVal = atom->getSrc(1);
+ } else if (atom->subOp == NV50_IR_SUBOP_ATOM_CAS) {
+ CmpInstruction *set =
+ bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(),
+ TYPE_U32, ld->getDef(0), atom->getSrc(1));
+
+ bld.mkCmp(OP_SLCT, CC_NE, TYPE_U32, (stVal = bld.getSSA()),
+ TYPE_U32, atom->getSrc(2), ld->getDef(0), set->getDef(0));
+ } else {
+ operation op;
+
+ switch (atom->subOp) {
+ case NV50_IR_SUBOP_ATOM_ADD:
+ op = OP_ADD;
+ break;
+ case NV50_IR_SUBOP_ATOM_AND:
+ op = OP_AND;
+ break;
+ case NV50_IR_SUBOP_ATOM_OR:
+ op = OP_OR;
+ break;
+ case NV50_IR_SUBOP_ATOM_XOR:
+ op = OP_XOR;
+ break;
+ case NV50_IR_SUBOP_ATOM_MIN:
+ op = OP_MIN;
+ break;
+ case NV50_IR_SUBOP_ATOM_MAX:
+ op = OP_MAX;
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+ stVal = bld.mkOp2v(op, atom->dType, bld.getSSA(), ld->getDef(0),
+ atom->getSrc(1));
+ }
+
+ Instruction *st =
+ bld.mkStore(OP_STORE, TYPE_U32, atom->getSrc(0)->asSym(),
+ atom->getIndirect(0, 0), stVal);
+ st->setDef(0, pred->getDef(0));
+ st->subOp = NV50_IR_SUBOP_STORE_UNLOCKED;
+
+ bld.mkFlow(OP_BRA, failLockBB, CC_ALWAYS, NULL);
+ setAndUnlockBB->cfg.attach(&failLockBB->cfg, Graph::Edge::TREE);
+
+ // Lock until the store has not been performed.
+ bld.setPosition(failLockBB, true);
+ bld.mkFlow(OP_BRA, tryLockBB, CC_NOT_P, pred->getDef(0));
+ bld.mkFlow(OP_BRA, joinBB, CC_ALWAYS, NULL);
+ failLockBB->cfg.attach(&tryLockBB->cfg, Graph::Edge::BACK);
+ failLockBB->cfg.attach(&joinBB->cfg, Graph::Edge::TREE);
+
+ bld.setPosition(joinBB, false);
+ bld.mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
+}
+
void
NVC0LoweringPass::handleSharedATOM(Instruction *atom)
{
bld.setPosition(tryLockAndSetBB, true);
Instruction *ld =
- bld.mkLoad(TYPE_U32, atom->getDef(0),
- bld.mkSymbol(FILE_MEMORY_SHARED, 0, TYPE_U32, 0), NULL);
+ bld.mkLoad(TYPE_U32, atom->getDef(0), atom->getSrc(0)->asSym(),
+ atom->getIndirect(0, 0));
ld->setDef(1, bld.getSSA(1, FILE_PREDICATE));
ld->subOp = NV50_IR_SUBOP_LOAD_LOCKED;
break;
default:
assert(0);
+ return;
}
Instruction *i =
}
Instruction *st =
- bld.mkStore(OP_STORE, TYPE_U32,
- bld.mkSymbol(FILE_MEMORY_SHARED, 0, TYPE_U32, 0),
- NULL, stVal);
+ bld.mkStore(OP_STORE, TYPE_U32, atom->getSrc(0)->asSym(),
+ atom->getIndirect(0, 0), stVal);
st->setPredicate(CC_P, ld->getDef(1));
st->subOp = NV50_IR_SUBOP_STORE_UNLOCKED;
sv = SV_LBASE;
break;
case FILE_MEMORY_SHARED:
- handleSharedATOM(atom);
+ // For Fermi/Kepler, we have to use ld lock/st unlock to perform atomic
+ // operations on shared memory. For Maxwell, ATOMS is enough.
+ if (targ->getChipset() < NVISA_GK104_CHIPSET)
+ handleSharedATOM(atom);
+ else if (targ->getChipset() < NVISA_GM107_CHIPSET)
+ handleSharedATOMNVE4(atom);
return true;
default:
- assert(atom->src(0).getFile() == FILE_MEMORY_GLOBAL);
- base = loadResInfo64(ind, atom->getSrc(0)->reg.fileIndex * 16);
+ assert(atom->src(0).getFile() == FILE_MEMORY_BUFFER);
+ base = loadBufInfo64(ind, atom->getSrc(0)->reg.fileIndex * 16);
assert(base->reg.size == 8);
if (ptr)
base = bld.mkOp2v(OP_ADD, TYPE_U64, base, base, ptr);
assert(base->reg.size == 8);
atom->setIndirect(0, 0, base);
+ atom->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
+
+ // Harden against out-of-bounds accesses
+ Value *offset = bld.loadImm(NULL, atom->getSrc(0)->reg.data.offset + typeSizeof(atom->sType));
+ Value *length = loadBufLength32(ind, atom->getSrc(0)->reg.fileIndex * 16);
+ Value *pred = new_LValue(func, FILE_PREDICATE);
+ if (ptr)
+ bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, ptr);
+ bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
+ atom->setPredicate(CC_NOT_P, pred);
+ if (atom->defExists(0)) {
+ Value *zero, *dst = atom->getDef(0);
+ atom->setDef(0, bld.getSSA());
+
+ bld.setPosition(atom, true);
+ bld.mkMov((zero = bld.getSSA()), bld.mkImm(0))
+ ->setPredicate(CC_P, pred);
+ bld.mkOp2(OP_UNION, TYPE_U32, dst, atom->getDef(0), zero);
+ }
+
return true;
}
base =
bool
NVC0LoweringPass::handleCasExch(Instruction *cas, bool needCctl)
{
- if (cas->src(0).getFile() == FILE_MEMORY_SHARED) {
- // ATOM_CAS and ATOM_EXCH are handled in handleSharedATOM().
- return false;
+ if (targ->getChipset() < NVISA_GM107_CHIPSET) {
+ if (cas->src(0).getFile() == FILE_MEMORY_SHARED) {
+ // ATOM_CAS and ATOM_EXCH are handled in handleSharedATOM().
+ return false;
+ }
}
if (cas->subOp != NV50_IR_SUBOP_ATOM_CAS &&
}
inline Value *
-NVC0LoweringPass::loadResInfo32(Value *ptr, uint32_t off)
+NVC0LoweringPass::loadResInfo32(Value *ptr, uint32_t off, uint16_t base)
{
- uint8_t b = prog->driver->io.resInfoCBSlot;
- off += prog->driver->io.suInfoBase;
+ uint8_t b = prog->driver->io.auxCBSlot;
+ off += base;
+
return bld.
mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U32, off), ptr);
}
inline Value *
-NVC0LoweringPass::loadResInfo64(Value *ptr, uint32_t off)
+NVC0LoweringPass::loadResInfo64(Value *ptr, uint32_t off, uint16_t base)
{
- uint8_t b = prog->driver->io.resInfoCBSlot;
- off += prog->driver->io.suInfoBase;
+ uint8_t b = prog->driver->io.auxCBSlot;
+ off += base;
if (ptr)
ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(), ptr, bld.mkImm(4));
}
inline Value *
-NVC0LoweringPass::loadResLength32(Value *ptr, uint32_t off)
+NVC0LoweringPass::loadResLength32(Value *ptr, uint32_t off, uint16_t base)
{
- uint8_t b = prog->driver->io.resInfoCBSlot;
- off += prog->driver->io.suInfoBase;
+ uint8_t b = prog->driver->io.auxCBSlot;
+ off += base;
if (ptr)
ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getScratch(), ptr, bld.mkImm(4));
mkLoadv(TYPE_U32, bld.mkSymbol(FILE_MEMORY_CONST, b, TYPE_U64, off + 8), ptr);
}
+inline Value *
+NVC0LoweringPass::loadBufInfo64(Value *ptr, uint32_t off)
+{
+ return loadResInfo64(ptr, off, prog->driver->io.bufInfoBase);
+}
+
+inline Value *
+NVC0LoweringPass::loadBufLength32(Value *ptr, uint32_t off)
+{
+ return loadResLength32(ptr, off, prog->driver->io.bufInfoBase);
+}
+
+inline Value *
+NVC0LoweringPass::loadUboInfo64(Value *ptr, uint32_t off)
+{
+ return loadResInfo64(ptr, off, prog->driver->io.uboInfoBase);
+}
+
+inline Value *
+NVC0LoweringPass::loadUboLength32(Value *ptr, uint32_t off)
+{
+ return loadResLength32(ptr, off, prog->driver->io.uboInfoBase);
+}
+
inline Value *
NVC0LoweringPass::loadMsInfo32(Value *ptr, uint32_t off)
{
/* On nvc0, surface info is obtained via the surface binding points passed
* to the SULD/SUST instructions.
* On nve4, surface info is stored in c[] and is used by various special
- * instructions, e.g. for clamping coordiantes or generating an address.
+ * instructions, e.g. for clamping coordinates or generating an address.
* They couldn't just have added an equivalent to TIC now, couldn't they ?
*/
-#define NVE4_SU_INFO_ADDR 0x00
-#define NVE4_SU_INFO_FMT 0x04
-#define NVE4_SU_INFO_DIM_X 0x08
-#define NVE4_SU_INFO_PITCH 0x0c
-#define NVE4_SU_INFO_DIM_Y 0x10
-#define NVE4_SU_INFO_ARRAY 0x14
-#define NVE4_SU_INFO_DIM_Z 0x18
-#define NVE4_SU_INFO_UNK1C 0x1c
-#define NVE4_SU_INFO_WIDTH 0x20
-#define NVE4_SU_INFO_HEIGHT 0x24
-#define NVE4_SU_INFO_DEPTH 0x28
-#define NVE4_SU_INFO_TARGET 0x2c
-#define NVE4_SU_INFO_CALL 0x30
-#define NVE4_SU_INFO_RAW_X 0x34
-#define NVE4_SU_INFO_MS_X 0x38
-#define NVE4_SU_INFO_MS_Y 0x3c
-
-#define NVE4_SU_INFO__STRIDE 0x40
-
-#define NVE4_SU_INFO_DIM(i) (0x08 + (i) * 8)
-#define NVE4_SU_INFO_SIZE(i) (0x20 + (i) * 4)
-#define NVE4_SU_INFO_MS(i) (0x38 + (i) * 4)
+#define NVC0_SU_INFO_ADDR 0x00
+#define NVC0_SU_INFO_FMT 0x04
+#define NVC0_SU_INFO_DIM_X 0x08
+#define NVC0_SU_INFO_PITCH 0x0c
+#define NVC0_SU_INFO_DIM_Y 0x10
+#define NVC0_SU_INFO_ARRAY 0x14
+#define NVC0_SU_INFO_DIM_Z 0x18
+#define NVC0_SU_INFO_UNK1C 0x1c
+#define NVC0_SU_INFO_WIDTH 0x20
+#define NVC0_SU_INFO_HEIGHT 0x24
+#define NVC0_SU_INFO_DEPTH 0x28
+#define NVC0_SU_INFO_TARGET 0x2c
+#define NVC0_SU_INFO_BSIZE 0x30
+#define NVC0_SU_INFO_RAW_X 0x34
+#define NVC0_SU_INFO_MS_X 0x38
+#define NVC0_SU_INFO_MS_Y 0x3c
+
+#define NVC0_SU_INFO__STRIDE 0x40
+
+#define NVC0_SU_INFO_DIM(i) (0x08 + (i) * 8)
+#define NVC0_SU_INFO_SIZE(i) (0x20 + (i) * 4)
+#define NVC0_SU_INFO_MS(i) (0x38 + (i) * 4)
+
+inline Value *
+NVC0LoweringPass::loadSuInfo32(Value *ptr, int slot, uint32_t off, bool bindless)
+{
+ uint32_t base = slot * NVC0_SU_INFO__STRIDE;
+
+ if (ptr) {
+ ptr = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(slot));
+ if (bindless)
+ ptr = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(511));
+ else
+ ptr = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(7));
+ ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(6));
+ base = 0;
+ }
+ off += base;
+
+ return loadResInfo32(ptr, off, bindless ? prog->driver->io.bindlessBase :
+ prog->driver->io.suInfoBase);
+}
static inline uint16_t getSuClampSubOp(const TexInstruction *su, int c)
{
}
}
+bool
+NVC0LoweringPass::handleSUQ(TexInstruction *suq)
+{
+ int mask = suq->tex.mask;
+ int dim = suq->tex.target.getDim();
+ int arg = dim + (suq->tex.target.isArray() || suq->tex.target.isCube());
+ Value *ind = suq->getIndirectR();
+ int slot = suq->tex.r;
+ int c, d;
+
+ for (c = 0, d = 0; c < 3; ++c, mask >>= 1) {
+ if (c >= arg || !(mask & 1))
+ continue;
+
+ int offset;
+
+ if (c == 1 && suq->tex.target == TEX_TARGET_1D_ARRAY) {
+ offset = NVC0_SU_INFO_SIZE(2);
+ } else {
+ offset = NVC0_SU_INFO_SIZE(c);
+ }
+ bld.mkMov(suq->getDef(d++), loadSuInfo32(ind, slot, offset, suq->tex.bindless));
+ if (c == 2 && suq->tex.target.isCube())
+ bld.mkOp2(OP_DIV, TYPE_U32, suq->getDef(d - 1), suq->getDef(d - 1),
+ bld.loadImm(NULL, 6));
+ }
+
+ if (mask & 1) {
+ if (suq->tex.target.isMS()) {
+ Value *ms_x = loadSuInfo32(ind, slot, NVC0_SU_INFO_MS(0), suq->tex.bindless);
+ Value *ms_y = loadSuInfo32(ind, slot, NVC0_SU_INFO_MS(1), suq->tex.bindless);
+ Value *ms = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getScratch(), ms_x, ms_y);
+ bld.mkOp2(OP_SHL, TYPE_U32, suq->getDef(d++), bld.loadImm(NULL, 1), ms);
+ } else {
+ bld.mkMov(suq->getDef(d++), bld.loadImm(NULL, 1));
+ }
+ }
+
+ bld.remove(suq);
+ return true;
+}
+
void
NVC0LoweringPass::adjustCoordinatesMS(TexInstruction *tex)
{
- const uint16_t base = tex->tex.r * NVE4_SU_INFO__STRIDE;
const int arg = tex->tex.target.getArgCount();
+ int slot = tex->tex.r;
if (tex->tex.target == TEX_TARGET_2D_MS)
tex->tex.target = TEX_TARGET_2D;
Value *s = tex->getSrc(arg - 1);
Value *tx = bld.getSSA(), *ty = bld.getSSA(), *ts = bld.getSSA();
+ Value *ind = tex->getIndirectR();
- Value *ms_x = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(0));
- Value *ms_y = loadResInfo32(NULL, base + NVE4_SU_INFO_MS(1));
+ Value *ms_x = loadSuInfo32(ind, slot, NVC0_SU_INFO_MS(0), tex->tex.bindless);
+ Value *ms_y = loadSuInfo32(ind, slot, NVC0_SU_INFO_MS(1), tex->tex.bindless);
bld.mkOp2(OP_SHL, TYPE_U32, tx, x, ms_x);
bld.mkOp2(OP_SHL, TYPE_U32, ty, y, ms_y);
const bool atom = su->op == OP_SUREDB || su->op == OP_SUREDP;
const bool raw =
su->op == OP_SULDB || su->op == OP_SUSTB || su->op == OP_SUREDB;
- const int idx = su->tex.r;
+ const int slot = su->tex.r;
const int dim = su->tex.target.getDim();
- const int arg = dim + (su->tex.target.isArray() ? 1 : 0);
- const uint16_t base = idx * NVE4_SU_INFO__STRIDE;
+ const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
int c;
Value *zero = bld.mkImm(0);
Value *p1 = NULL;
Value *src[3];
Value *bf, *eau, *off;
Value *addr, *pred;
+ Value *ind = su->getIndirectR();
off = bld.getScratch(4);
bf = bld.getScratch(4);
// calculate clamped coordinates
for (c = 0; c < arg; ++c) {
+ int dimc = c;
+
+ if (c == 1 && su->tex.target == TEX_TARGET_1D_ARRAY) {
+ // The array index is stored in the Z component for 1D arrays.
+ dimc = 2;
+ }
+
src[c] = bld.getScratch();
if (c == 0 && raw)
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_RAW_X);
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_RAW_X, su->tex.bindless);
else
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_DIM(c));
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_DIM(dimc), su->tex.bindless);
bld.mkOp3(OP_SUCLAMP, TYPE_S32, src[c], su->getSrc(c), v, zero)
- ->subOp = getSuClampSubOp(su, c);
+ ->subOp = getSuClampSubOp(su, dimc);
}
for (; c < 3; ++c)
src[c] = zero;
if (su->tex.target == TEX_TARGET_BUFFER) {
src[0]->getInsn()->setFlagsDef(1, pred);
} else
- if (su->tex.target.isArray()) {
+ if (su->tex.target.isArray() || su->tex.target.isCube()) {
p1 = bld.getSSA(1, FILE_PREDICATE);
src[dim]->getInsn()->setFlagsDef(1, p1);
}
bld.mkOp2(OP_AND, TYPE_U32, off, src[0], bld.loadImm(NULL, 0xffff));
} else
if (dim == 3) {
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_UNK1C, su->tex.bindless);
bld.mkOp3(OP_MADSP, TYPE_U32, off, src[2], v, src[1])
->subOp = NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_PITCH, su->tex.bindless);
bld.mkOp3(OP_MADSP, TYPE_U32, off, off, v, src[0])
->subOp = NV50_IR_SUBOP_MADSP(0,2,8); // u32 u16l u16l
} else {
assert(dim == 2);
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_PITCH);
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_PITCH, su->tex.bindless);
bld.mkOp3(OP_MADSP, TYPE_U32, off, src[1], v, src[0])
- ->subOp = su->tex.target.isArray() ?
+ ->subOp = (su->tex.target.isArray() || su->tex.target.isCube()) ?
NV50_IR_SUBOP_MADSP_SD : NV50_IR_SUBOP_MADSP(4,2,8); // u16l u16l u16l
}
if (raw) {
bf = src[0];
} else {
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_FMT, su->tex.bindless);
bld.mkOp3(OP_VSHL, TYPE_U32, bf, src[0], v, zero)
->subOp = NV50_IR_SUBOP_V1(7,6,8|2);
}
break;
case 2:
z = off;
- if (!su->tex.target.isArray()) {
- z = loadResInfo32(NULL, base + NVE4_SU_INFO_UNK1C);
+ if (!su->tex.target.isArray() && !su->tex.target.isCube()) {
+ z = loadSuInfo32(ind, slot, NVC0_SU_INFO_UNK1C, su->tex.bindless);
subOp = NV50_IR_SUBOP_SUBFM_3D;
}
break;
}
// part 2
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_ADDR);
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_ADDR, su->tex.bindless);
if (su->tex.target == TEX_TARGET_BUFFER) {
eau = v;
eau = bld.mkOp3v(OP_SUEAU, TYPE_U32, bld.getScratch(4), off, bf, v);
}
// add array layer offset
- if (su->tex.target.isArray()) {
- v = loadResInfo32(NULL, base + NVE4_SU_INFO_ARRAY);
+ if (su->tex.target.isArray() || su->tex.target.isCube()) {
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_ARRAY, su->tex.bindless);
if (dim == 1)
bld.mkOp3(OP_MADSP, TYPE_U32, eau, src[1], v, eau)
->subOp = NV50_IR_SUBOP_MADSP(4,0,0); // u16 u24 u32
// let's just set it 0 for raw access and hope it works
v = raw ?
- bld.mkImm(0) : loadResInfo32(NULL, base + NVE4_SU_INFO_FMT);
+ bld.mkImm(0) : loadSuInfo32(ind, slot, NVC0_SU_INFO_FMT, su->tex.bindless);
// get rid of old coordinate sources, make space for fmt info and predicate
su->moveSources(arg, 3 - arg);
su->setSrc(0, addr);
su->setSrc(1, v);
su->setSrc(2, pred);
+ su->setIndirectR(NULL);
+
+ // prevent read fault when the image is not actually bound
+ CmpInstruction *pred1 =
+ bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
+ TYPE_U32, bld.mkImm(0),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_ADDR, su->tex.bindless));
+
+ if (su->op != OP_SUSTP && su->tex.format) {
+ const TexInstruction::ImgFormatDesc *format = su->tex.format;
+ int blockwidth = format->bits[0] + format->bits[1] +
+ format->bits[2] + format->bits[3];
+
+ // make sure that the format doesn't mismatch
+ assert(format->components != 0);
+ bld.mkCmp(OP_SET_OR, CC_NE, TYPE_U32, pred1->getDef(0),
+ TYPE_U32, bld.loadImm(NULL, blockwidth / 8),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_BSIZE, su->tex.bindless),
+ pred1->getDef(0));
+ }
+ su->setPredicate(CC_NOT_P, pred1->getDef(0));
+
+ // TODO: initialize def values to 0 when the surface operation is not
+ // performed (not needed for stores). Also, fix the "address bounds test"
+ // subtests from arb_shader_image_load_store-invalid for buffers, because it
+ // seems like that the predicate is not correctly set by suclamp.
+}
+
+static DataType
+getSrcType(const TexInstruction::ImgFormatDesc *t, int c)
+{
+ switch (t->type) {
+ case FLOAT: return t->bits[c] == 16 ? TYPE_F16 : TYPE_F32;
+ case UNORM: return t->bits[c] == 8 ? TYPE_U8 : TYPE_U16;
+ case SNORM: return t->bits[c] == 8 ? TYPE_S8 : TYPE_S16;
+ case UINT:
+ return (t->bits[c] == 8 ? TYPE_U8 :
+ (t->bits[c] == 16 ? TYPE_U16 : TYPE_U32));
+ case SINT:
+ return (t->bits[c] == 8 ? TYPE_S8 :
+ (t->bits[c] == 16 ? TYPE_S16 : TYPE_S32));
+ }
+ return TYPE_NONE;
+}
+
+static DataType
+getDestType(const ImgType type) {
+ switch (type) {
+ case FLOAT:
+ case UNORM:
+ case SNORM:
+ return TYPE_F32;
+ case UINT:
+ return TYPE_U32;
+ case SINT:
+ return TYPE_S32;
+ default:
+ assert(!"Impossible type");
+ return TYPE_NONE;
+ }
}
void
-NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction *su)
+NVC0LoweringPass::convertSurfaceFormat(TexInstruction *su)
{
- processSurfaceCoordsNVE4(su);
+ const TexInstruction::ImgFormatDesc *format = su->tex.format;
+ int width = format->bits[0] + format->bits[1] +
+ format->bits[2] + format->bits[3];
+ Value *untypedDst[4] = {};
+ Value *typedDst[4] = {};
- // Who do we hate more ? The person who decided that nvc0's SULD doesn't
- // have to support conversion or the person who decided that, in OpenCL,
- // you don't have to specify the format here like you do in OpenGL ?
-
- if (su->op == OP_SULDP) {
- // We don't patch shaders. Ever.
- // You get an indirect call to our library blob here.
- // But at least it's uniform.
- FlowInstruction *call;
- LValue *p[3];
- LValue *r[5];
- uint16_t base = su->tex.r * NVE4_SU_INFO__STRIDE + NVE4_SU_INFO_CALL;
-
- for (int i = 0; i < 4; ++i)
- (r[i] = bld.getScratch(4, FILE_GPR))->reg.data.id = i;
- for (int i = 0; i < 3; ++i)
- (p[i] = bld.getScratch(1, FILE_PREDICATE))->reg.data.id = i;
- (r[4] = bld.getScratch(8, FILE_GPR))->reg.data.id = 4;
-
- bld.mkMov(p[1], bld.mkImm((su->cache == CACHE_CA) ? 1 : 0), TYPE_U8);
- bld.mkMov(p[2], bld.mkImm((su->cache == CACHE_CG) ? 1 : 0), TYPE_U8);
- bld.mkMov(p[0], su->getSrc(2), TYPE_U8);
- bld.mkMov(r[4], su->getSrc(0), TYPE_U64);
- bld.mkMov(r[2], su->getSrc(1), TYPE_U32);
-
- call = bld.mkFlow(OP_CALL, NULL, su->cc, su->getPredicate());
-
- call->indirect = 1;
- call->absolute = 1;
- call->setSrc(0, bld.mkSymbol(FILE_MEMORY_CONST,
- prog->driver->io.resInfoCBSlot, TYPE_U32,
- prog->driver->io.suInfoBase + base));
- call->setSrc(1, r[2]);
- call->setSrc(2, r[4]);
- for (int i = 0; i < 3; ++i)
- call->setSrc(3 + i, p[i]);
- for (int i = 0; i < 4; ++i) {
- call->setDef(i, r[i]);
- bld.mkMov(su->getDef(i), r[i]);
+ // We must convert this to a generic load.
+ su->op = OP_SULDB;
+
+ su->dType = typeOfSize(width / 8);
+ su->sType = TYPE_U8;
+
+ for (int i = 0; i < width / 32; i++)
+ untypedDst[i] = bld.getSSA();
+ if (width < 32)
+ untypedDst[0] = bld.getSSA();
+
+ for (int i = 0; i < 4; i++) {
+ typedDst[i] = su->getDef(i);
+ }
+
+ // Set the untyped dsts as the su's destinations
+ for (int i = 0; i < 4; i++)
+ su->setDef(i, untypedDst[i]);
+
+ bld.setPosition(su, true);
+
+ // Unpack each component into the typed dsts
+ int bits = 0;
+ for (int i = 0; i < 4; bits += format->bits[i], i++) {
+ if (!typedDst[i])
+ continue;
+ if (i >= format->components) {
+ if (format->type == FLOAT ||
+ format->type == UNORM ||
+ format->type == SNORM)
+ bld.loadImm(typedDst[i], i == 3 ? 1.0f : 0.0f);
+ else
+ bld.loadImm(typedDst[i], i == 3 ? 1 : 0);
+ continue;
}
- call->setDef(4, p[1]);
- delete_Instruction(bld.getProgram(), su);
+
+ // Get just that component's data into the relevant place
+ if (format->bits[i] == 32)
+ bld.mkMov(typedDst[i], untypedDst[i]);
+ else if (format->bits[i] == 16)
+ bld.mkCvt(OP_CVT, getDestType(format->type), typedDst[i],
+ getSrcType(format, i), untypedDst[i / 2])
+ ->subOp = (i & 1) << (format->type == FLOAT ? 0 : 1);
+ else if (format->bits[i] == 8)
+ bld.mkCvt(OP_CVT, getDestType(format->type), typedDst[i],
+ getSrcType(format, i), untypedDst[0])->subOp = i;
+ else {
+ bld.mkOp2(OP_EXTBF, TYPE_U32, typedDst[i], untypedDst[bits / 32],
+ bld.mkImm((bits % 32) | (format->bits[i] << 8)));
+ if (format->type == UNORM || format->type == SNORM)
+ bld.mkCvt(OP_CVT, TYPE_F32, typedDst[i], getSrcType(format, i), typedDst[i]);
+ }
+
+ // Normalize / convert as necessary
+ if (format->type == UNORM)
+ bld.mkOp2(OP_MUL, TYPE_F32, typedDst[i], typedDst[i], bld.loadImm(NULL, 1.0f / ((1 << format->bits[i]) - 1)));
+ else if (format->type == SNORM)
+ bld.mkOp2(OP_MUL, TYPE_F32, typedDst[i], typedDst[i], bld.loadImm(NULL, 1.0f / ((1 << (format->bits[i] - 1)) - 1)));
+ else if (format->type == FLOAT && format->bits[i] < 16) {
+ bld.mkOp2(OP_SHL, TYPE_U32, typedDst[i], typedDst[i], bld.loadImm(NULL, 15 - format->bits[i]));
+ bld.mkCvt(OP_CVT, TYPE_F32, typedDst[i], TYPE_F16, typedDst[i]);
+ }
+ }
+
+ if (format->bgra) {
+ std::swap(typedDst[0], typedDst[2]);
}
+}
+
+void
+NVC0LoweringPass::handleSurfaceOpNVE4(TexInstruction *su)
+{
+ processSurfaceCoordsNVE4(su);
+
+ if (su->op == OP_SULDP)
+ convertSurfaceFormat(su);
if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
- // FIXME: for out of bounds access, destination value will be undefined !
- Value *pred = su->getSrc(2);
- CondCode cc = CC_NOT_P;
- if (su->getPredicate()) {
- pred = bld.getScratch(1, FILE_PREDICATE);
- cc = su->cc;
- if (cc == CC_NOT_P) {
- bld.mkOp2(OP_OR, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
- } else {
- bld.mkOp2(OP_AND, TYPE_U8, pred, su->getPredicate(), su->getSrc(2));
- pred->getInsn()->src(1).mod = Modifier(NV50_IR_MOD_NOT);
- }
- }
- Instruction *red = bld.mkOp(OP_ATOM, su->dType, su->getDef(0));
+ assert(su->getPredicate());
+ Value *pred =
+ bld.mkOp2v(OP_OR, TYPE_U8, bld.getScratch(1, FILE_PREDICATE),
+ su->getPredicate(), su->getSrc(2));
+
+ Instruction *red = bld.mkOp(OP_ATOM, su->dType, bld.getSSA());
red->subOp = su->subOp;
- if (!gMemBase)
- gMemBase = bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0);
- red->setSrc(0, gMemBase);
+ red->setSrc(0, bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, 0));
red->setSrc(1, su->getSrc(3));
if (su->subOp == NV50_IR_SUBOP_ATOM_CAS)
red->setSrc(2, su->getSrc(4));
red->setIndirect(0, 0, su->getSrc(0));
- red->setPredicate(cc, pred);
+
+ // make sure to initialize dst value when the atomic operation is not
+ // performed
+ Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
+
+ assert(su->cc == CC_NOT_P);
+ red->setPredicate(su->cc, pred);
+ mov->setPredicate(CC_P, pred);
+
+ bld.mkOp2(OP_UNION, TYPE_U32, su->getDef(0),
+ red->getDef(0), mov->getDef(0));
+
delete_Instruction(bld.getProgram(), su);
handleCasExch(red, true);
- } else {
+ }
+
+ if (su->op == OP_SUSTB || su->op == OP_SUSTP)
su->sType = (su->tex.target == TEX_TARGET_BUFFER) ? TYPE_U32 : TYPE_U8;
+}
+
+void
+NVC0LoweringPass::processSurfaceCoordsNVC0(TexInstruction *su)
+{
+ const int slot = su->tex.r;
+ const int dim = su->tex.target.getDim();
+ const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
+ int c;
+ Value *zero = bld.mkImm(0);
+ Value *src[3];
+ Value *v;
+ Value *ind = su->getIndirectR();
+
+ bld.setPosition(su, false);
+
+ adjustCoordinatesMS(su);
+
+ if (ind) {
+ Value *ptr;
+ ptr = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(), ind, bld.mkImm(su->tex.r));
+ ptr = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ptr, bld.mkImm(7));
+ su->setIndirectR(ptr);
+ }
+
+ // get surface coordinates
+ for (c = 0; c < arg; ++c)
+ src[c] = su->getSrc(c);
+ for (; c < 3; ++c)
+ src[c] = zero;
+
+ // calculate pixel offset
+ if (su->op == OP_SULDP || su->op == OP_SUREDP) {
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_BSIZE, su->tex.bindless);
+ su->setSrc(0, bld.mkOp2v(OP_MUL, TYPE_U32, bld.getSSA(), src[0], v));
+ }
+
+ // add array layer offset
+ if (su->tex.target.isArray() || su->tex.target.isCube()) {
+ v = loadSuInfo32(ind, slot, NVC0_SU_INFO_ARRAY, su->tex.bindless);
+ assert(dim > 1);
+ su->setSrc(2, bld.mkOp2v(OP_MUL, TYPE_U32, bld.getSSA(), src[2], v));
+ }
+
+ // prevent read fault when the image is not actually bound
+ CmpInstruction *pred =
+ bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
+ TYPE_U32, bld.mkImm(0),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_ADDR, su->tex.bindless));
+ if (su->op != OP_SUSTP && su->tex.format) {
+ const TexInstruction::ImgFormatDesc *format = su->tex.format;
+ int blockwidth = format->bits[0] + format->bits[1] +
+ format->bits[2] + format->bits[3];
+
+ assert(format->components != 0);
+ // make sure that the format doesn't mismatch when it's not FMT_NONE
+ bld.mkCmp(OP_SET_OR, CC_NE, TYPE_U32, pred->getDef(0),
+ TYPE_U32, bld.loadImm(NULL, blockwidth / 8),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_BSIZE, su->tex.bindless),
+ pred->getDef(0));
+ }
+ su->setPredicate(CC_NOT_P, pred->getDef(0));
+}
+
+void
+NVC0LoweringPass::handleSurfaceOpNVC0(TexInstruction *su)
+{
+ if (su->tex.target == TEX_TARGET_1D_ARRAY) {
+ /* As 1d arrays also need 3 coordinates, switching to TEX_TARGET_2D_ARRAY
+ * will simplify the lowering pass and the texture constraints. */
+ su->moveSources(1, 1);
+ su->setSrc(1, bld.loadImm(NULL, 0));
+ su->tex.target = TEX_TARGET_2D_ARRAY;
+ }
+
+ processSurfaceCoordsNVC0(su);
+
+ if (su->op == OP_SULDP)
+ convertSurfaceFormat(su);
+
+ if (su->op == OP_SUREDB || su->op == OP_SUREDP) {
+ const int dim = su->tex.target.getDim();
+ const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
+ LValue *addr = bld.getSSA(8);
+ Value *def = su->getDef(0);
+
+ su->op = OP_SULEA;
+
+ // Set the destination to the address
+ su->dType = TYPE_U64;
+ su->setDef(0, addr);
+ su->setDef(1, su->getPredicate());
+
+ bld.setPosition(su, true);
+
+ // Perform the atomic op
+ Instruction *red = bld.mkOp(OP_ATOM, su->sType, bld.getSSA());
+ red->subOp = su->subOp;
+ red->setSrc(0, bld.mkSymbol(FILE_MEMORY_GLOBAL, 0, su->sType, 0));
+ red->setSrc(1, su->getSrc(arg));
+ if (red->subOp == NV50_IR_SUBOP_ATOM_CAS)
+ red->setSrc(2, su->getSrc(arg + 1));
+ red->setIndirect(0, 0, addr);
+
+ // make sure to initialize dst value when the atomic operation is not
+ // performed
+ Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
+
+ assert(su->cc == CC_NOT_P);
+ red->setPredicate(su->cc, su->getPredicate());
+ mov->setPredicate(CC_P, su->getPredicate());
+
+ bld.mkOp2(OP_UNION, TYPE_U32, def, red->getDef(0), mov->getDef(0));
+
+ handleCasExch(red, false);
+ }
+}
+
+void
+NVC0LoweringPass::processSurfaceCoordsGM107(TexInstruction *su)
+{
+ const int slot = su->tex.r;
+ const int dim = su->tex.target.getDim();
+ const int arg = dim + (su->tex.target.isArray() || su->tex.target.isCube());
+ Value *ind = su->getIndirectR();
+ Value *handle;
+ int pos = 0;
+
+ bld.setPosition(su, false);
+
+ // add texture handle
+ switch (su->op) {
+ case OP_SUSTP:
+ pos = 4;
+ break;
+ case OP_SUREDP:
+ pos = (su->subOp == NV50_IR_SUBOP_ATOM_CAS) ? 2 : 1;
+ break;
+ default:
+ assert(pos == 0);
+ break;
+ }
+ if (su->tex.bindless)
+ handle = ind;
+ else
+ handle = loadTexHandle(ind, slot + 32);
+ su->setSrc(arg + pos, handle);
+
+ // The address check doesn't make sense here. The format check could make
+ // sense but it's a bit of a pain.
+ if (su->tex.bindless)
+ return;
+
+ // prevent read fault when the image is not actually bound
+ CmpInstruction *pred =
+ bld.mkCmp(OP_SET, CC_EQ, TYPE_U32, bld.getSSA(1, FILE_PREDICATE),
+ TYPE_U32, bld.mkImm(0),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_ADDR, su->tex.bindless));
+ if (su->op != OP_SUSTP && su->tex.format) {
+ const TexInstruction::ImgFormatDesc *format = su->tex.format;
+ int blockwidth = format->bits[0] + format->bits[1] +
+ format->bits[2] + format->bits[3];
+
+ assert(format->components != 0);
+ // make sure that the format doesn't mismatch when it's not FMT_NONE
+ bld.mkCmp(OP_SET_OR, CC_NE, TYPE_U32, pred->getDef(0),
+ TYPE_U32, bld.loadImm(NULL, blockwidth / 8),
+ loadSuInfo32(ind, slot, NVC0_SU_INFO_BSIZE, su->tex.bindless),
+ pred->getDef(0));
+ }
+ su->setPredicate(CC_NOT_P, pred->getDef(0));
+}
+
+void
+NVC0LoweringPass::handleSurfaceOpGM107(TexInstruction *su)
+{
+ processSurfaceCoordsGM107(su);
+
+ if (su->op == OP_SULDP)
+ convertSurfaceFormat(su);
+
+ if (su->op == OP_SUREDP) {
+ Value *def = su->getDef(0);
+
+ su->op = OP_SUREDB;
+
+ // There may not be a predicate in the bindless case.
+ if (su->getPredicate()) {
+ su->setDef(0, bld.getSSA());
+
+ bld.setPosition(su, true);
+
+ // make sure to initialize dst value when the atomic operation is not
+ // performed
+ Instruction *mov = bld.mkMov(bld.getSSA(), bld.loadImm(NULL, 0));
+
+ assert(su->cc == CC_NOT_P);
+ mov->setPredicate(CC_P, su->getPredicate());
+
+ bld.mkOp2(OP_UNION, TYPE_U32, def, su->getDef(0), mov->getDef(0));
+ }
}
}
return true;
}
+void
+NVC0LoweringPass::handleLDST(Instruction *i)
+{
+ if (i->src(0).getFile() == FILE_SHADER_INPUT) {
+ if (prog->getType() == Program::TYPE_COMPUTE) {
+ i->getSrc(0)->reg.file = FILE_MEMORY_CONST;
+ i->getSrc(0)->reg.fileIndex = 0;
+ } else
+ if (prog->getType() == Program::TYPE_GEOMETRY &&
+ i->src(0).isIndirect(0)) {
+ // XXX: this assumes vec4 units
+ Value *ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
+ i->getIndirect(0, 0), bld.mkImm(4));
+ i->setIndirect(0, 0, ptr);
+ i->op = OP_VFETCH;
+ } else {
+ i->op = OP_VFETCH;
+ assert(prog->getType() != Program::TYPE_FRAGMENT); // INTERP
+ }
+ } else if (i->src(0).getFile() == FILE_MEMORY_CONST) {
+ if (targ->getChipset() >= NVISA_GK104_CHIPSET &&
+ prog->getType() == Program::TYPE_COMPUTE) {
+ // The launch descriptor only allows to set up 8 CBs, but OpenGL
+ // requires at least 12 UBOs. To bypass this limitation, we store the
+ // addrs into the driver constbuf and we directly load from the global
+ // memory.
+ int8_t fileIndex = i->getSrc(0)->reg.fileIndex - 1;
+ Value *ind = i->getIndirect(0, 1);
+
+ if (!ind && fileIndex == -1)
+ return;
+
+ if (ind) {
+ // Clamp the UBO index when an indirect access is used to avoid
+ // loading information from the wrong place in the driver cb.
+ // TODO - synchronize the max with the driver.
+ ind = bld.mkOp2v(OP_MIN, TYPE_U32, bld.getSSA(),
+ bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(),
+ ind, bld.loadImm(NULL, fileIndex)),
+ bld.loadImm(NULL, 13));
+ fileIndex = 0;
+ }
+
+ Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
+ Value *ptr = loadUboInfo64(ind, fileIndex * 16);
+ Value *length = loadUboLength32(ind, fileIndex * 16);
+ Value *pred = new_LValue(func, FILE_PREDICATE);
+ if (i->src(0).isIndirect(0)) {
+ bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
+ bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
+ }
+ i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
+ i->setIndirect(0, 1, NULL);
+ i->setIndirect(0, 0, ptr);
+ bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
+ i->setPredicate(CC_NOT_P, pred);
+ Value *zero, *dst = i->getDef(0);
+ i->setDef(0, bld.getSSA());
+
+ bld.setPosition(i, true);
+ bld.mkMov((zero = bld.getSSA()), bld.mkImm(0))
+ ->setPredicate(CC_P, pred);
+ bld.mkOp2(OP_UNION, TYPE_U32, dst, i->getDef(0), zero);
+ } else if (i->src(0).isIndirect(1)) {
+ Value *ptr;
+ if (i->src(0).isIndirect(0))
+ ptr = bld.mkOp3v(OP_INSBF, TYPE_U32, bld.getSSA(),
+ i->getIndirect(0, 1), bld.mkImm(0x1010),
+ i->getIndirect(0, 0));
+ else
+ ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
+ i->getIndirect(0, 1), bld.mkImm(16));
+ i->setIndirect(0, 1, NULL);
+ i->setIndirect(0, 0, ptr);
+ i->subOp = NV50_IR_SUBOP_LDC_IS;
+ }
+ } else if (i->src(0).getFile() == FILE_SHADER_OUTPUT) {
+ assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
+ i->op = OP_VFETCH;
+ } else if (i->src(0).getFile() == FILE_MEMORY_BUFFER) {
+ Value *ind = i->getIndirect(0, 1);
+ Value *ptr = loadBufInfo64(ind, i->getSrc(0)->reg.fileIndex * 16);
+ // XXX come up with a way not to do this for EVERY little access but
+ // rather to batch these up somehow. Unfortunately we've lost the
+ // information about the field width by the time we get here.
+ Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
+ Value *length = loadBufLength32(ind, i->getSrc(0)->reg.fileIndex * 16);
+ Value *pred = new_LValue(func, FILE_PREDICATE);
+ if (i->src(0).isIndirect(0)) {
+ bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
+ bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
+ }
+ i->setIndirect(0, 1, NULL);
+ i->setIndirect(0, 0, ptr);
+ i->getSrc(0)->reg.file = FILE_MEMORY_GLOBAL;
+ bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
+ i->setPredicate(CC_NOT_P, pred);
+ if (i->defExists(0)) {
+ Value *zero, *dst = i->getDef(0);
+ i->setDef(0, bld.getSSA());
+
+ bld.setPosition(i, true);
+ bld.mkMov((zero = bld.getSSA()), bld.mkImm(0))
+ ->setPredicate(CC_P, pred);
+ bld.mkOp2(OP_UNION, TYPE_U32, dst, i->getDef(0), zero);
+ }
+ }
+}
+
void
NVC0LoweringPass::readTessCoord(LValue *dst, int c)
{
y = dst;
} else {
assert(c == 2);
+ if (prog->driver->prop.tp.domain != PIPE_PRIM_TRIANGLES) {
+ bld.mkMov(dst, bld.loadImm(NULL, 0));
+ return;
+ }
x = bld.getSSA();
y = bld.getSSA();
}
i->setSrc(0, bld.mkImm(sv == SV_GRIDID ? 0 : 1));
return true;
}
+ // Fallthrough
+ case SV_WORK_DIM:
addr += prog->driver->prop.cp.gridInfoBase;
bld.mkLoad(TYPE_U32, i->getDef(0),
- bld.mkSymbol(FILE_MEMORY_CONST, 0, TYPE_U32, addr), NULL);
+ bld.mkSymbol(FILE_MEMORY_CONST, prog->driver->io.auxCBSlot,
+ TYPE_U32, addr), NULL);
break;
case SV_SAMPLE_INDEX:
// TODO: Properly pass source as an address in the PIX address space
bld.mkLoad(TYPE_F32,
i->getDef(0),
bld.mkSymbol(
- FILE_MEMORY_CONST, prog->driver->io.resInfoCBSlot,
+ FILE_MEMORY_CONST, prog->driver->io.auxCBSlot,
TYPE_U32, prog->driver->io.sampleInfoBase +
4 * sym->reg.data.sv.index),
off);
break;
}
- case SV_SAMPLE_MASK:
+ case SV_SAMPLE_MASK: {
ld = bld.mkOp1(OP_PIXLD, TYPE_U32, i->getDef(0), bld.mkImm(0));
ld->subOp = NV50_IR_SUBOP_PIXLD_COVMASK;
+ Instruction *sampleid =
+ bld.mkOp1(OP_PIXLD, TYPE_U32, bld.getSSA(), bld.mkImm(0));
+ sampleid->subOp = NV50_IR_SUBOP_PIXLD_SAMPLEID;
+ Value *masked =
+ bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), ld->getDef(0),
+ bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
+ bld.loadImm(NULL, 1), sampleid->getDef(0)));
+ if (prog->driver->prop.fp.persampleInvocation) {
+ bld.mkMov(i->getDef(0), masked);
+ } else {
+ bld.mkOp3(OP_SELP, TYPE_U32, i->getDef(0), ld->getDef(0), masked,
+ bld.mkImm(0))
+ ->subOp = 1;
+ }
break;
+ }
case SV_BASEVERTEX:
case SV_BASEINSTANCE:
case SV_DRAWID:
default:
if (prog->getType() == Program::TYPE_TESSELLATION_EVAL && !i->perPatch)
vtx = bld.mkOp1v(OP_PFETCH, TYPE_U32, bld.getSSA(), bld.mkImm(0));
- ld = bld.mkFetch(i->getDef(0), i->dType,
- FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
- ld->perPatch = i->perPatch;
+ if (prog->getType() == Program::TYPE_FRAGMENT) {
+ bld.mkInterp(NV50_IR_INTERP_FLAT, i->getDef(0), addr, NULL);
+ } else {
+ ld = bld.mkFetch(i->getDef(0), i->dType,
+ FILE_SHADER_INPUT, addr, i->getIndirect(0, 0), vtx);
+ ld->perPatch = i->perPatch;
+ }
break;
}
bld.getBB()->remove(i);
{
if (i->dType == TYPE_F64) {
Value *pred = bld.getSSA(1, FILE_PREDICATE);
- Value *zero = bld.loadImm(NULL, 0);
+ Value *zero = bld.loadImm(NULL, 0.0);
Value *dst = bld.getSSA(8);
bld.mkOp1(OP_RSQ, i->dType, dst, i->getSrc(0));
bld.mkCmp(OP_SET, CC_LE, i->dType, pred, i->dType, i->getSrc(0), zero);
return handleWRSV(i);
case OP_STORE:
case OP_LOAD:
- if (i->src(0).getFile() == FILE_SHADER_INPUT) {
- if (prog->getType() == Program::TYPE_COMPUTE) {
- i->getSrc(0)->reg.file = FILE_MEMORY_CONST;
- i->getSrc(0)->reg.fileIndex = 0;
- } else
- if (prog->getType() == Program::TYPE_GEOMETRY &&
- i->src(0).isIndirect(0)) {
- // XXX: this assumes vec4 units
- Value *ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
- i->getIndirect(0, 0), bld.mkImm(4));
- i->setIndirect(0, 0, ptr);
- i->op = OP_VFETCH;
- } else {
- i->op = OP_VFETCH;
- assert(prog->getType() != Program::TYPE_FRAGMENT); // INTERP
- }
- } else if (i->src(0).getFile() == FILE_MEMORY_CONST) {
- if (i->src(0).isIndirect(1)) {
- Value *ptr;
- if (i->src(0).isIndirect(0))
- ptr = bld.mkOp3v(OP_INSBF, TYPE_U32, bld.getSSA(),
- i->getIndirect(0, 1), bld.mkImm(0x1010),
- i->getIndirect(0, 0));
- else
- ptr = bld.mkOp2v(OP_SHL, TYPE_U32, bld.getSSA(),
- i->getIndirect(0, 1), bld.mkImm(16));
- i->setIndirect(0, 1, NULL);
- i->setIndirect(0, 0, ptr);
- i->subOp = NV50_IR_SUBOP_LDC_IS;
- }
- } else if (i->src(0).getFile() == FILE_SHADER_OUTPUT) {
- assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
- i->op = OP_VFETCH;
- } else if (i->src(0).getFile() == FILE_MEMORY_GLOBAL) {
- Value *ind = i->getIndirect(0, 1);
- Value *ptr = loadResInfo64(ind, i->getSrc(0)->reg.fileIndex * 16);
- // XXX come up with a way not to do this for EVERY little access but
- // rather to batch these up somehow. Unfortunately we've lost the
- // information about the field width by the time we get here.
- Value *offset = bld.loadImm(NULL, i->getSrc(0)->reg.data.offset + typeSizeof(i->sType));
- Value *length = loadResLength32(ind, i->getSrc(0)->reg.fileIndex * 16);
- Value *pred = new_LValue(func, FILE_PREDICATE);
- if (i->src(0).isIndirect(0)) {
- bld.mkOp2(OP_ADD, TYPE_U64, ptr, ptr, i->getIndirect(0, 0));
- bld.mkOp2(OP_ADD, TYPE_U32, offset, offset, i->getIndirect(0, 0));
- }
- i->setIndirect(0, 1, NULL);
- i->setIndirect(0, 0, ptr);
- bld.mkCmp(OP_SET, CC_GT, TYPE_U32, pred, TYPE_U32, offset, length);
- i->setPredicate(CC_NOT_P, pred);
- if (i->defExists(0)) {
- bld.mkMov(i->getDef(0), bld.mkImm(0));
- }
- }
+ handleLDST(i);
break;
case OP_ATOM:
{
- const bool cctl = i->src(0).getFile() == FILE_MEMORY_GLOBAL;
+ const bool cctl = i->src(0).getFile() == FILE_MEMORY_BUFFER;
handleATOM(i);
handleCasExch(i, cctl);
}
case OP_SUSTP:
case OP_SUREDB:
case OP_SUREDP:
- if (targ->getChipset() >= NVISA_GK104_CHIPSET)
+ if (targ->getChipset() >= NVISA_GM107_CHIPSET)
+ handleSurfaceOpGM107(i->asTex());
+ else if (targ->getChipset() >= NVISA_GK104_CHIPSET)
handleSurfaceOpNVE4(i->asTex());
+ else
+ handleSurfaceOpNVC0(i->asTex());
break;
case OP_SUQ:
- handleSUQ(i);
+ handleSUQ(i->asTex());
+ break;
+ case OP_BUFQ:
+ handleBUFQ(i);
break;
default:
break;
/* Kepler+ has a special opcode to compute a new base address to be used
* for indirect loads.
+ *
+ * Maxwell+ has an additional similar requirement for indirect
+ * interpolation ops in frag shaders.
*/
- if (targ->getChipset() >= NVISA_GK104_CHIPSET && !i->perPatch &&
- (i->op == OP_VFETCH || i->op == OP_EXPORT) && i->src(0).isIndirect(0)) {
+ bool doAfetch = false;
+ if (targ->getChipset() >= NVISA_GK104_CHIPSET &&
+ !i->perPatch &&
+ (i->op == OP_VFETCH || i->op == OP_EXPORT) &&
+ i->src(0).isIndirect(0)) {
+ doAfetch = true;
+ }
+ if (targ->getChipset() >= NVISA_GM107_CHIPSET &&
+ (i->op == OP_LINTERP || i->op == OP_PINTERP) &&
+ i->src(0).isIndirect(0)) {
+ doAfetch = true;
+ }
+
+ if (doAfetch) {
+ Value *addr = cloneShallow(func, i->getSrc(0));
Instruction *afetch = bld.mkOp1(OP_AFETCH, TYPE_U32, bld.getSSA(),
- cloneShallow(func, i->getSrc(0)));
+ i->getSrc(0));
afetch->setIndirect(0, 0, i->getIndirect(0, 0));
- i->src(0).get()->reg.data.offset = 0;
+ addr->reg.data.offset = 0;
+ i->setSrc(0, addr);
i->setIndirect(0, 0, afetch->getDef(0));
}