void expr(Instruction *, ImmediateValue&, ImmediateValue&);
void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
- void opnd(Instruction *, ImmediateValue&, int s);
+ /* true if i was deleted */
+ bool opnd(Instruction *i, ImmediateValue&, int s);
void opnd3(Instruction *, ImmediateValue&);
void unary(Instruction *, const ImmediateValue&);
if (i->srcExists(2) &&
i->src(0).getImmediate(src0) &&
i->src(1).getImmediate(src1) &&
- i->src(2).getImmediate(src2))
+ i->src(2).getImmediate(src2)) {
expr(i, src0, src1, src2);
- else
+ } else
if (i->srcExists(1) &&
- i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
+ i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1)) {
expr(i, src0, src1);
- else
- if (i->srcExists(0) && i->src(0).getImmediate(src0))
- opnd(i, src0, 0);
- else
- if (i->srcExists(1) && i->src(1).getImmediate(src1))
- opnd(i, src1, 1);
+ } else
+ if (i->srcExists(0) && i->src(0).getImmediate(src0)) {
+ if (opnd(i, src0, 0))
+ continue;
+ } else
+ if (i->srcExists(1) && i->src(1).getImmediate(src1)) {
+ if (opnd(i, src1, 1))
+ continue;
+ }
if (i->srcExists(2) && i->src(2).getImmediate(src2))
opnd3(i, src2);
}
return false;
}
-void
+bool
ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
{
const int t = !s;
const operation op = i->op;
Instruction *newi = i;
+ bool deleted = false;
switch (i->op) {
case OP_SPLIT: {
val >>= bitsize;
}
delete_Instruction(prog, i);
+ deleted = true;
break;
}
case OP_MUL:
newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
TYPE_S32, i->getSrc(t), bld.mkImm(0));
delete_Instruction(prog, i);
+ deleted = true;
} else if (imm0.isInteger(0) || imm0.isInteger(1)) {
// The high bits can't be set in this case (either mul by 0 or
// unsigned by 1)
if (!isFloatType(i->dType) && !i->src(t).mod) {
bld.setPosition(i, false);
int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
- if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, NULL))
+ if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, NULL)) {
delete_Instruction(prog, i);
+ deleted = true;
+ }
} else
if (i->postFactor && i->sType == TYPE_F32) {
/* Can't emit a postfactor with an immediate, have to fold it in */
if (!isFloatType(i->dType) && !i->subOp && !i->src(t).mod && !i->src(2).mod) {
bld.setPosition(i, false);
int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
- if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, i->getSrc(2)))
+ if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, i->getSrc(2))) {
delete_Instruction(prog, i);
+ deleted = true;
+ }
}
break;
case OP_SUB:
bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
delete_Instruction(prog, i);
+ deleted = true;
} else
if (imm0.reg.data.s32 == -1) {
i->op = OP_NEG;
bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
delete_Instruction(prog, i);
+ deleted = true;
}
break;
newi = bld.mkOp2(OP_UNION, TYPE_S32, i->getDef(0), v1, v2);
delete_Instruction(prog, i);
+ deleted = true;
}
} else if (s == 1) {
// In this case, we still want the optimized lowering that we get
newi->src(1).mod = Modifier(NV50_IR_MOD_NEG);
delete_Instruction(prog, i);
+ deleted = true;
}
break;
CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
CondCode cc, ccZ;
if (imm0.reg.data.u32 != 0 || !si)
- return;
+ return false;
cc = si->setCond;
ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
// We do everything assuming var (cmp) 0, reverse the condition if 0 is
case CC_GT: break; // bool > 0 -- bool
case CC_NE: break; // bool != 0 -- bool
default:
- return;
+ return false;
}
// Update the condition of this SET to be identical to the origin set,
} else if (src->asCmp()) {
CmpInstruction *cmp = src->asCmp();
if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
- return;
+ return false;
if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
- return;
+ return false;
if (imm0.reg.data.f32 != 1.0)
- return;
+ return false;
if (cmp->dType != TYPE_U32)
- return;
+ return false;
cmp->dType = TYPE_F32;
if (i->src(t).mod != Modifier(0)) {
case OP_MUL:
int muls;
if (isFloatType(si->dType))
- return;
+ return false;
if (si->src(1).getImmediate(imm1))
muls = 1;
else if (si->src(0).getImmediate(imm1))
muls = 0;
else
- return;
+ return false;
bld.setPosition(i, false);
i->op = OP_MUL;
case OP_ADD:
int adds;
if (isFloatType(si->dType))
- return;
+ return false;
if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
adds = 0;
else if (si->src(1).getImmediate(imm1))
adds = 1;
else
- return;
+ return false;
if (si->src(!adds).mod != Modifier(0))
- return;
+ return false;
// SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
// This is more operations, but if one of x, y is an immediate, then
bld.mkImm(imm0.reg.data.u32)));
break;
default:
- return;
+ return false;
}
}
break;
case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
default:
- return;
+ return false;
}
if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
res = 31 - res;
// TODO: handle 64-bit values properly
if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
- return;
+ return false;
// TODO: handle single byte/word extractions
if (i->subOp)
- return;
+ return false;
bld.setPosition(i, true); /* make sure bld is init'ed */
CLAMP(imm0.reg.data.u16, umin, umax) : \
imm0.reg.data.u16; \
break; \
- default: return; \
+ default: return false; \
} \
i->setSrc(0, bld.mkImm(res.data.dst)); \
break
case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
default:
- return;
+ return false;
}
i->setSrc(0, bld.mkImm(res.data.f32));
break;
case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
default:
- return;
+ return false;
}
i->setSrc(0, bld.mkImm(res.data.f64));
break;
default:
- return;
+ return false;
}
#undef CASE
break;
}
default:
- return;
+ return false;
}
// This can get left behind some of the optimizations which simplify
if (newi->op != op)
foldCount++;
+ return deleted;
}
// =============================================================================