field bits<16> AMDILOp = 0;
field bits<3> Gen = 0;
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
let OutOperandList = outs;
let InOperandList = ins;
let AsmString = asm;
//
//===----------------------------------------------------------------------===//
-let Namespace = "AMDIL" in {
+let Namespace = "AMDGPU" in {
def sel_x : SubRegIndex;
def sel_y : SubRegIndex;
def sel_z : SubRegIndex;
{
switch (opcode) {
default: return false;
- case AMDIL::RETURN:
- case AMDIL::LOAD_INPUT:
- case AMDIL::LAST:
- case AMDIL::MASK_WRITE:
- case AMDIL::RESERVE_REG:
+ case AMDGPU::RETURN:
+ case AMDGPU::LOAD_INPUT:
+ case AMDGPU::LAST:
+ case AMDGPU::MASK_WRITE:
+ case AMDGPU::RESERVE_REG:
return true;
}
}
switch(opcode) {
default: return false;
- case AMDIL::COS_r600:
- case AMDIL::COS_eg:
- case AMDIL::MULLIT:
- case AMDIL::MUL_LIT_r600:
- case AMDIL::MUL_LIT_eg:
- case AMDIL::EXP_IEEE_r600:
- case AMDIL::EXP_IEEE_eg:
- case AMDIL::LOG_CLAMPED_r600:
- case AMDIL::LOG_IEEE_r600:
- case AMDIL::LOG_CLAMPED_eg:
- case AMDIL::LOG_IEEE_eg:
+ case AMDGPU::COS_r600:
+ case AMDGPU::COS_eg:
+ case AMDGPU::MULLIT:
+ case AMDGPU::MUL_LIT_r600:
+ case AMDGPU::MUL_LIT_eg:
+ case AMDGPU::EXP_IEEE_r600:
+ case AMDGPU::EXP_IEEE_eg:
+ case AMDGPU::LOG_CLAMPED_r600:
+ case AMDGPU::LOG_IEEE_r600:
+ case AMDGPU::LOG_CLAMPED_eg:
+ case AMDGPU::LOG_IEEE_eg:
return true;
}
}
{
switch(opcode) {
default: return false;
- case AMDIL::TEX_LD:
- case AMDIL::TEX_GET_TEXTURE_RESINFO:
- case AMDIL::TEX_SAMPLE:
- case AMDIL::TEX_SAMPLE_C:
- case AMDIL::TEX_SAMPLE_L:
- case AMDIL::TEX_SAMPLE_C_L:
- case AMDIL::TEX_SAMPLE_LB:
- case AMDIL::TEX_SAMPLE_C_LB:
- case AMDIL::TEX_SAMPLE_G:
- case AMDIL::TEX_SAMPLE_C_G:
- case AMDIL::TEX_GET_GRADIENTS_H:
- case AMDIL::TEX_GET_GRADIENTS_V:
- case AMDIL::TEX_SET_GRADIENTS_H:
- case AMDIL::TEX_SET_GRADIENTS_V:
+ case AMDGPU::TEX_LD:
+ case AMDGPU::TEX_GET_TEXTURE_RESINFO:
+ case AMDGPU::TEX_SAMPLE:
+ case AMDGPU::TEX_SAMPLE_C:
+ case AMDGPU::TEX_SAMPLE_L:
+ case AMDGPU::TEX_SAMPLE_C_L:
+ case AMDGPU::TEX_SAMPLE_LB:
+ case AMDGPU::TEX_SAMPLE_C_LB:
+ case AMDGPU::TEX_SAMPLE_G:
+ case AMDGPU::TEX_SAMPLE_C_G:
+ case AMDGPU::TEX_GET_GRADIENTS_H:
+ case AMDGPU::TEX_GET_GRADIENTS_V:
+ case AMDGPU::TEX_SET_GRADIENTS_H:
+ case AMDGPU::TEX_SET_GRADIENTS_V:
return true;
}
}
{
switch(opcode) {
default: return false;
- case AMDIL::DOT4_r600:
- case AMDIL::DOT4_eg:
+ case AMDGPU::DOT4_r600:
+ case AMDGPU::DOT4_eg:
return true;
}
}
{
switch(opcode) {
default: return false;
- case AMDIL::CUBE_r600:
- case AMDIL::CUBE_eg:
+ case AMDGPU::CUBE_r600:
+ case AMDGPU::CUBE_eg:
return true;
}
}
{
switch(opcode) {
default: return false;
- case AMDIL::BREAK_LOGICALZ_f32:
- case AMDIL::BREAK_LOGICALNZ_i32:
- case AMDIL::BREAK_LOGICALZ_i32:
- case AMDIL::BREAK_LOGICALNZ_f32:
- case AMDIL::CONTINUE_LOGICALNZ_f32:
- case AMDIL::IF_LOGICALNZ_i32:
- case AMDIL::IF_LOGICALZ_f32:
- case AMDIL::ELSE:
- case AMDIL::ENDIF:
- case AMDIL::ENDLOOP:
- case AMDIL::IF_LOGICALNZ_f32:
- case AMDIL::WHILELOOP:
+ case AMDGPU::BREAK_LOGICALZ_f32:
+ case AMDGPU::BREAK_LOGICALNZ_i32:
+ case AMDGPU::BREAK_LOGICALZ_i32:
+ case AMDGPU::BREAK_LOGICALNZ_f32:
+ case AMDGPU::CONTINUE_LOGICALNZ_f32:
+ case AMDGPU::IF_LOGICALNZ_i32:
+ case AMDGPU::IF_LOGICALZ_f32:
+ case AMDGPU::ELSE:
+ case AMDGPU::ENDIF:
+ case AMDGPU::ENDLOOP:
+ case AMDGPU::IF_LOGICALNZ_f32:
+ case AMDGPU::WHILELOOP:
return true;
}
}
-void AMDGPU::utilAddLiveIn(llvm::MachineFunction * MF,
- llvm::MachineRegisterInfo & MRI,
- const llvm::TargetInstrInfo * TII,
+void AMDGPU::utilAddLiveIn(MachineFunction * MF,
+ MachineRegisterInfo & MRI,
+ const TargetInstrInfo * TII,
unsigned physReg, unsigned virtReg)
{
if (!MRI.isLiveIn(physReg)) {
class MachineRegisterInfo;
class TargetInstrInfo;
-}
-
namespace AMDGPU {
bool isPlaceHolderOpcode(unsigned opcode);
#define MO_FLAG_ABS (1 << 2)
#define MO_FLAG_MASK (1 << 3)
-void utilAddLiveIn(llvm::MachineFunction * MF, llvm::MachineRegisterInfo & MRI,
- const llvm::TargetInstrInfo * TII, unsigned physReg, unsigned virtReg);
+void utilAddLiveIn(MachineFunction * MF, MachineRegisterInfo & MRI,
+ const TargetInstrInfo * TII, unsigned physReg, unsigned virtReg);
} // End namespace AMDGPU
+} // End namespace llvm
+
#endif // AMDGPU_UTIL_H
bool landBlkHasOtherPred = (landBlk->pred_size() > 2);
- //insert AMDIL::ENDIF to avoid special case "input landBlk == NULL"
+ //insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL"
typename BlockT::iterator insertPos =
CFGTraits::getInstrPos
- (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDIL::ENDIF, passRep));
+ (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDGPU::ENDIF, passRep));
if (landBlkHasOtherPred) {
unsigned immReg =
CFGTraits::insertCompareInstrBefore(landBlk, insertPos, passRep, cmpResReg,
initReg, immReg);
CFGTraits::insertCondBranchBefore(landBlk, insertPos,
- AMDIL::IF_LOGICALZ_i32, passRep,
+ AMDGPU::IF_LOGICALZ_i32, passRep,
cmpResReg, DebugLoc());
}
- CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDIL::IF_LOGICALNZ_i32,
+ CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDGPU::IF_LOGICALNZ_i32,
passRep, initReg, DebugLoc());
if (migrateTrue) {
// (initVal != 1).
CFGTraits::insertAssignInstrBefore(trueBlk, passRep, initReg, 1);
}
- CFGTraits::insertInstrBefore(insertPos, AMDIL::ELSE, passRep);
+ CFGTraits::insertInstrBefore(insertPos, AMDGPU::ELSE, passRep);
if (migrateFalse) {
migrateInstruction(falseBlk, landBlk, insertPos);
// (initVal != 0)
CFGTraits::insertAssignInstrBefore(falseBlk, passRep, initReg, 0);
}
- //CFGTraits::insertInstrBefore(insertPos, AMDIL::ENDIF, passRep);
+ //CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep);
if (landBlkHasOtherPred) {
// add endif
- CFGTraits::insertInstrBefore(insertPos, AMDIL::ENDIF, passRep);
+ CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep);
// put initReg = 2 to other predecessors of landBlk
for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
}
retireBlock(curBlk, trueBlk);
}
- CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ELSE, passRep);
+ CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ELSE, passRep);
if (falseBlk) {
curBlk->splice(branchInstrPos, falseBlk, FirstNonDebugInstr(falseBlk),
}
retireBlock(curBlk, falseBlk);
}
- CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ENDIF, passRep);
+ CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep);
//curBlk->remove(branchInstrPos);
branchInstr->eraseFromParent();
}
/* we last inserterd the DebugLoc in the
- * BREAK_LOGICALZ_i32 or AMDIL::BREAK_LOGICALNZ statement in the current dstBlk.
+ * BREAK_LOGICALZ_i32 or AMDGPU::BREAK_LOGICALNZ statement in the current dstBlk.
* search for the DebugLoc in the that statement.
* if not found, we have to insert the empty/default DebugLoc */
InstrT *loopBreakInstr = CFGTraits::getLoopBreakInstr(dstBlk);
DebugLoc DLBreak = (loopBreakInstr) ? loopBreakInstr->getDebugLoc() : DebugLoc();
- CFGTraits::insertInstrBefore(dstBlk, AMDIL::WHILELOOP, passRep, DLBreak);
+ CFGTraits::insertInstrBefore(dstBlk, AMDGPU::WHILELOOP, passRep, DLBreak);
// Loop breakInitRegs are init before entering the loop.
for (typename std::set<RegiT>::const_iterator iter =
loopLand->breakInitRegs.begin(),
InstrT *continueInstr = CFGTraits::getContinueInstr(dstBlk);
DebugLoc DLContinue = (continueInstr) ? continueInstr->getDebugLoc() : DebugLoc();
- CFGTraits::insertInstrEnd(dstBlk, AMDIL::ENDLOOP, passRep, DLContinue);
+ CFGTraits::insertInstrEnd(dstBlk, AMDGPU::ENDLOOP, passRep, DLContinue);
// Loop breakOnRegs are check after the ENDLOOP: break the loop outside this
// loop.
for (typename std::set<RegiT>::const_iterator iter =
loopLand->breakOnRegs.begin(),
iterEnd = loopLand->breakOnRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertCondBranchEnd(dstBlk, AMDIL::BREAK_LOGICALNZ_i32, passRep,
+ CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::BREAK_LOGICALNZ_i32, passRep,
*iter);
}
// loop.
for (std::set<RegiT>::const_iterator iter = loopLand->contOnRegs.begin(),
iterEnd = loopLand->contOnRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertCondBranchEnd(dstBlk, AMDIL::CONTINUE_LOGICALNZ_i32,
+ CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::CONTINUE_LOGICALNZ_i32,
passRep, *iter);
}
if (setReg != INVALIDREGNUM) {
CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
}
- CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::BREAK, passRep);
- CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ENDIF, passRep);
+ CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::BREAK, passRep);
+ CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep);
} //if_logical
//now branchInst can be erase safely
if (setReg != INVALIDREGNUM) {
CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
// insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDIL::BREAK, passRep, DL);
+ CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, DL);
} else {
// insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDIL::CONTINUE, passRep, DL);
+ CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, DL);
}
- CFGTraits::insertInstrEnd(contingBlk, AMDIL::ENDIF, passRep, DL);
+ CFGTraits::insertInstrEnd(contingBlk, AMDGPU::ENDIF, passRep, DL);
} else {
int branchOpcode =
trueBranch == contBlk ? CFGTraits::getContinueNzeroOpcode(oldOpcode)
if (setReg != INVALIDREGNUM) {
CFGTraits::insertAssignInstrBefore(contingBlk, passRep, setReg, 1);
// insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDIL::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
+ CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
} else {
// insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDIL::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
+ CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
}
} //else
BlockT *newBlk = funcRep->CreateMachineBasicBlock();
funcRep->push_back(newBlk); //insert to function
- CFGTraits::insertInstrEnd(newBlk, AMDIL::CONTINUE, passRep);
+ CFGTraits::insertInstrEnd(newBlk, AMDGPU::CONTINUE, passRep);
SHOWNEWBLK(newBlk, "New continue block: ");
for (typename std::set<BlockT*>::const_iterator iter = endBlkSet.begin(),
BuildMI(preBranchBlk, DL, tii->get(tii->getIEQOpcode()), condResReg)
.addReg(endBranchReg).addReg(preValReg);
- BuildMI(preBranchBlk, DL, tii->get(AMDIL::BRANCH_COND_i32))
+ BuildMI(preBranchBlk, DL, tii->get(AMDGPU::BRANCH_COND_i32))
.addMBB(preExitBlk).addReg(condResReg);
preBranchBlk->addSuccessor(preExitBlk);
funcRep->getRegInfo().createVirtualRegister(I32RC);
CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 1);
InstrT *newInstr =
- CFGTraits::insertInstrBefore(insertPos, AMDIL::BRANCH_COND_i32, passRep);
+ CFGTraits::insertInstrBefore(insertPos, AMDGPU::BRANCH_COND_i32, passRep);
MachineInstrBuilder(newInstr).addMBB(loopHeader).addReg(immReg, false);
SHOWNEWINSTR(newInstr);
DEFAULT_VEC_SLOTS> &retBlks) {
BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock();
funcRep->push_back(dummyExitBlk); //insert to function
- CFGTraits::insertInstrEnd(dummyExitBlk, AMDIL::RETURN, passRep);
+ CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep);
for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator iter =
retBlks.begin(),
static int getBreakNzeroOpcode(int oldOpcode) {
switch(oldOpcode) {
- ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::BREAK_LOGICALNZ);
+ ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::BREAK_LOGICALNZ);
default:
assert(0 && "internal error");
};
static int getBreakZeroOpcode(int oldOpcode) {
switch(oldOpcode) {
- ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::BREAK_LOGICALZ);
+ ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::BREAK_LOGICALZ);
default:
assert(0 && "internal error");
};
static int getBranchNzeroOpcode(int oldOpcode) {
switch(oldOpcode) {
- ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::IF_LOGICALNZ);
+ ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::IF_LOGICALNZ);
default:
assert(0 && "internal error");
};
static int getBranchZeroOpcode(int oldOpcode) {
switch(oldOpcode) {
- ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::IF_LOGICALZ);
+ ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::IF_LOGICALZ);
default:
assert(0 && "internal error");
};
static int getContinueNzeroOpcode(int oldOpcode)
{
switch(oldOpcode) {
- ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::CONTINUE_LOGICALNZ);
+ ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::CONTINUE_LOGICALNZ);
default:
assert(0 && "internal error");
};
static int getContinueZeroOpcode(int oldOpcode) {
switch(oldOpcode) {
- ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::CONTINUE_LOGICALZ);
+ ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::CONTINUE_LOGICALZ);
default:
assert(0 && "internal error");
};
static bool isCondBranch(MachineInstr *instr) {
switch (instr->getOpcode()) {
- ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
break;
default:
return false;
static bool isUncondBranch(MachineInstr *instr) {
switch (instr->getOpcode()) {
- case AMDIL::BRANCH:
+ case AMDGPU::BRANCH:
break;
default:
return false;
MachineBasicBlock::reverse_iterator iter = blk->rbegin();
if (iter != blk->rend()) {
MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDIL::RETURN) {
+ if (instr->getOpcode() == AMDGPU::RETURN) {
return instr;
}
}
MachineBasicBlock::reverse_iterator iter = blk->rbegin();
if (iter != blk->rend()) {
MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDIL::CONTINUE) {
+ if (instr->getOpcode() == AMDGPU::CONTINUE) {
return instr;
}
}
static MachineInstr *getLoopBreakInstr(MachineBasicBlock *blk) {
for (MachineBasicBlock::iterator iter = blk->begin(); (iter != blk->end()); ++iter) {
MachineInstr *instr = &(*iter);
- if ((instr->getOpcode() == AMDIL::BREAK_LOGICALNZ_i32) || (instr->getOpcode() == AMDIL::BREAK_LOGICALZ_i32)) {
+ if ((instr->getOpcode() == AMDGPU::BREAK_LOGICALNZ_i32) || (instr->getOpcode() == AMDGPU::BREAK_LOGICALZ_i32)) {
return instr;
}
}
MachineBasicBlock::iterator iterEnd = entryBlk->end();
MachineBasicBlock::iterator iter = pre;
while (iter != iterEnd) {
- if (pre->getOpcode() == AMDIL::CONTINUE
- && iter->getOpcode() == AMDIL::ENDLOOP) {
+ if (pre->getOpcode() == AMDGPU::CONTINUE
+ && iter->getOpcode() == AMDGPU::ENDLOOP) {
contInstr.push_back(pre);
}
pre = iter;
class ILFormat<ILOpCode op, dag outs, dag ins, string asmstr, list<dag> pattern>
: Instruction {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
dag OutOperandList = outs;
dag InOperandList = ins;
ILOpCode operation = op;
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) {
unsigned int FI = FIN->getIndex();
EVT OpVT = N->getValueType(0);
- unsigned int NewOpc = AMDIL::COPY;
+ unsigned int NewOpc = AMDGPU::COPY;
SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
return CurDAG->SelectNodeTo(N, NewOpc, OpVT, TFI);
}
&& isInt<16>(IMMOffset->getZExtValue())) {
Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
CurDAG->getEntryNode().getDebugLoc(),
- AMDIL::ZERO, MVT::i32);
+ AMDGPU::ZERO, MVT::i32);
Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
return true;
}
setOperationAction(ISD::Constant , MVT::i32 , Legal);
setOperationAction(ISD::TRAP , MVT::Other , Legal);
- setStackPointerRegisterToSaveRestore(AMDIL::SP);
+ setStackPointerRegisterToSaveRestore(AMDGPU::SP);
setSchedulingPreference(Sched::RegPressure);
setPow2DivIsCheap(false);
setPrefLoopAlignment(16);
{
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
- unsigned int SPReg = AMDIL::SP;
+ unsigned int SPReg = AMDGPU::SP;
DebugLoc DL = Op.getDebugLoc();
SDValue SP = DAG.getCopyFromReg(Chain,
DL,
switch (iter->getOpcode()) {
default:
break;
- ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
- case AMDIL::BRANCH:
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
+ case AMDGPU::BRANCH:
return true;
};
++iter;
} else {
MachineInstr *firstBranch = iter;
if (!getNextBranchInstr(++iter, MBB)) {
- if (firstBranch->getOpcode() == AMDIL::BRANCH) {
+ if (firstBranch->getOpcode() == AMDGPU::BRANCH) {
TBB = firstBranch->getOperand(0).getMBB();
firstBranch->eraseFromParent();
retVal = false;
} else {
MachineInstr *secondBranch = iter;
if (!getNextBranchInstr(++iter, MBB)) {
- if (secondBranch->getOpcode() == AMDIL::BRANCH) {
+ if (secondBranch->getOpcode() == AMDGPU::BRANCH) {
TBB = firstBranch->getOperand(0).getMBB();
Cond.push_back(firstBranch->getOperand(1));
FBB = secondBranch->getOperand(0).getMBB();
switch (MI->getDesc().OpInfo->RegClass) {
default: // FIXME: fallthrough??
- case AMDIL::GPRI32RegClassID: return AMDIL::BRANCH_COND_i32;
- case AMDIL::GPRF32RegClassID: return AMDIL::BRANCH_COND_f32;
+ case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
+ case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
};
}
}
if (FBB == 0) {
if (Cond.empty()) {
- BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(TBB);
} else {
BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
.addMBB(TBB).addReg(Cond[0].getReg());
} else {
BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
.addMBB(TBB).addReg(Cond[0].getReg());
- BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(FBB);
}
assert(0 && "Inserting two branches not supported");
return 0;
switch (I->getOpcode()) {
default:
return 0;
- ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
- case AMDIL::BRANCH:
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
+ case AMDGPU::BRANCH:
I->eraseFromParent();
break;
}
// FIXME: only one case??
default:
return 1;
- ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
I->eraseFromParent();
break;
}
return MBB->end();
}
while (--tmp) {
- if (tmp->getOpcode() == AMDIL::ENDLOOP
- || tmp->getOpcode() == AMDIL::ENDIF
- || tmp->getOpcode() == AMDIL::ELSE) {
+ if (tmp->getOpcode() == AMDGPU::ENDLOOP
+ || tmp->getOpcode() == AMDGPU::ENDIF
+ || tmp->getOpcode() == AMDGPU::ELSE) {
if (tmp == MBB->begin()) {
return tmp;
} else {
DebugLoc DL;
switch (RC->getID()) {
- case AMDIL::GPRF32RegClassID:
- Opc = AMDIL::PRIVATESTORE_f32;
+ case AMDGPU::GPRF32RegClassID:
+ Opc = AMDGPU::PRIVATESTORE_f32;
break;
- case AMDIL::GPRI32RegClassID:
- Opc = AMDIL::PRIVATESTORE_i32;
+ case AMDGPU::GPRI32RegClassID:
+ Opc = AMDGPU::PRIVATESTORE_i32;
break;
}
if (MI != MBB.end()) DL = MI->getDebugLoc();
MachineFrameInfo &MFI = *MF.getFrameInfo();
DebugLoc DL;
switch (RC->getID()) {
- case AMDIL::GPRF32RegClassID:
- Opc = AMDIL::PRIVATELOAD_f32;
+ case AMDGPU::GPRF32RegClassID:
+ Opc = AMDGPU::PRIVATELOAD_f32;
break;
- case AMDIL::GPRI32RegClassID:
- Opc = AMDIL::PRIVATELOAD_i32;
+ case AMDGPU::GPRI32RegClassID:
+ Opc = AMDGPU::PRIVATELOAD_i32;
break;
}
unsigned int
AMDILRegisterInfo::getRARegister() const
{
- return AMDIL::RA;
+ return AMDGPU::RA;
}
unsigned int
AMDILRegisterInfo::getFrameRegister(const MachineFunction &MF) const
{
- return AMDIL::FP;
+ return AMDGPU::FP;
}
unsigned int
virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT)
const {
- return AMDIL::GPRI32RegisterClass;
+ return AMDGPU::GPRI32RegisterClass;
}
private:
mutable int64_t baseOffset;
class AMDILReg<bits<16> num, string n> : Register<n> {
field bits<16> Value;
let Value = num;
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
}
// We will start with 8 registers for each class before expanding to more
def MEM : AMDILReg<999, "mem">, DwarfRegNum<[999]>;
def RA : AMDILReg<998, "r998">, DwarfRegNum<[998]>;
def FP : AMDILReg<997, "r997">, DwarfRegNum<[997]>;
-def GPRI16 : RegisterClass<"AMDIL", [i16], 16,
+def GPRI16 : RegisterClass<"AMDGPU", [i16], 16,
(add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
{
let AltOrders = [(add (sequence "R%u", 1, 20))];
return 1;
}];
}
-def GPRI32 : RegisterClass<"AMDIL", [i32], 32,
+def GPRI32 : RegisterClass<"AMDGPU", [i32], 32,
(add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
{
let AltOrders = [(add (sequence "R%u", 1, 20))];
return 1;
}];
}
-def GPRF32 : RegisterClass<"AMDIL", [f32], 32,
+def GPRF32 : RegisterClass<"AMDGPU", [f32], 32,
(add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
{
let AltOrders = [(add (sequence "R%u", 1, 20))];
// These macros expand to common groupings of RegClass ID's
#define ExpandCaseTo1CompRegID \
-case AMDIL::GPRI32RegClassID: \
-case AMDIL::GPRF32RegClassID:
+case AMDGPU::GPRI32RegClassID: \
+case AMDGPU::GPRF32RegClassID:
#define ExpandCaseTo32BitType(Instr) \
case Instr##_i32: \
isReduction = false;
isVector = false;
isCube = false;
- } else if (MI.getOpcode() == AMDIL::RETURN ||
- MI.getOpcode() == AMDIL::BUNDLE ||
- MI.getOpcode() == AMDIL::KILL) {
+ } else if (MI.getOpcode() == AMDGPU::RETURN ||
+ MI.getOpcode() == AMDGPU::BUNDLE ||
+ MI.getOpcode() == AMDGPU::KILL) {
continue;
} else {
switch(MI.getOpcode()) {
- case AMDIL::RAT_WRITE_CACHELESS_eg:
+ case AMDGPU::RAT_WRITE_CACHELESS_eg:
{
uint64_t inst = getBinaryCodeForInstr(MI);
// Set End Of Program bit
// set in a prior pass.
MachineBasicBlock::iterator NextI = llvm::next(I);
MachineInstr &NextMI = *NextI;
- if (NextMI.getOpcode() == AMDIL::RETURN) {
+ if (NextMI.getOpcode() == AMDGPU::RETURN) {
inst |= (((uint64_t)1) << 53);
}
emitByte(INSTR_NATIVE);
emit(inst);
break;
}
- case AMDIL::VTX_READ_PARAM_eg:
- case AMDIL::VTX_READ_GLOBAL_eg:
- case AMDIL::VTX_READ_GLOBAL_128_eg:
+ case AMDGPU::VTX_READ_PARAM_eg:
+ case AMDGPU::VTX_READ_GLOBAL_eg:
+ case AMDGPU::VTX_READ_GLOBAL_128_eg:
{
uint64_t InstWord01 = getBinaryCodeForInstr(MI);
uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
if (MO.isReg()) {
unsigned reg = MO.getReg();
emitTwoBytes(getHWReg(reg));
- if (reg == AMDIL::ALU_LITERAL_X) {
+ if (reg == AMDGPU::ALU_LITERAL_X) {
const MachineInstr * parent = MO.getParent();
unsigned immOpIndex = parent->getNumExplicitOperands() - 1;
MachineOperand immOp = parent->getOperand(immOpIndex);
if ((!(MO.getTargetFlags() & MO_FLAG_ABS))
&& (MO.getTargetFlags() & MO_FLAG_NEG ||
(MO.isReg() &&
- (MO.getReg() == AMDIL::NEG_ONE || MO.getReg() == AMDIL::NEG_HALF)))){
+ (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){
emitByte(1);
} else {
emitByte(0);
{
unsigned opcode = MI.getOpcode();
- bool hasOffsets = (opcode == AMDIL::TEX_LD);
+ bool hasOffsets = (opcode == AMDGPU::TEX_LD);
unsigned op_offset = hasOffsets ? 3 : 0;
int64_t sampler = MI.getOperand(op_offset+2).getImm();
int64_t textureType = MI.getOperand(op_offset+3).getImm();
if (textureType == TEXTURE_1D_ARRAY
|| textureType == TEXTURE_SHADOW1D_ARRAY) {
- if (opcode == AMDIL::TEX_SAMPLE_C_L || opcode == AMDIL::TEX_SAMPLE_C_LB) {
+ if (opcode == AMDGPU::TEX_SAMPLE_C_L || opcode == AMDGPU::TEX_SAMPLE_C_LB) {
coordType[ELEMENT_Y] = 0;
} else {
coordType[ELEMENT_Z] = 0;
|| textureType == TEXTURE_SHADOW2D
|| textureType == TEXTURE_SHADOWRECT
|| textureType == TEXTURE_SHADOW1D_ARRAY)
- && opcode != AMDIL::TEX_SAMPLE_C_L
- && opcode != AMDIL::TEX_SAMPLE_C_LB) {
+ && opcode != AMDGPU::TEX_SAMPLE_C_L
+ && opcode != AMDGPU::TEX_SAMPLE_C_LB) {
srcSelect[ELEMENT_W] = ELEMENT_Z;
}
// Emit FC Instruction
enum FCInstr instr;
switch (MI.getOpcode()) {
- case AMDIL::BREAK_LOGICALZ_f32:
+ case AMDGPU::BREAK_LOGICALZ_f32:
instr = FC_BREAK;
break;
- case AMDIL::BREAK_LOGICALNZ_f32:
- case AMDIL::BREAK_LOGICALNZ_i32:
+ case AMDGPU::BREAK_LOGICALNZ_f32:
+ case AMDGPU::BREAK_LOGICALNZ_i32:
instr = FC_BREAK_NZ_INT;
break;
- case AMDIL::BREAK_LOGICALZ_i32:
+ case AMDGPU::BREAK_LOGICALZ_i32:
instr = FC_BREAK_Z_INT;
break;
- case AMDIL::CONTINUE_LOGICALNZ_f32:
- case AMDIL::CONTINUE_LOGICALNZ_i32:
+ case AMDGPU::CONTINUE_LOGICALNZ_f32:
+ case AMDGPU::CONTINUE_LOGICALNZ_i32:
instr = FC_CONTINUE;
break;
- case AMDIL::IF_LOGICALNZ_f32:
- case AMDIL::IF_LOGICALNZ_i32:
+ case AMDGPU::IF_LOGICALNZ_f32:
+ case AMDGPU::IF_LOGICALNZ_i32:
instr = FC_IF;
break;
- case AMDIL::IF_LOGICALZ_f32:
+ case AMDGPU::IF_LOGICALZ_f32:
abort();
break;
- case AMDIL::ELSE:
+ case AMDGPU::ELSE:
instr = FC_ELSE;
break;
- case AMDIL::ENDIF:
+ case AMDGPU::ENDIF:
instr = FC_ENDIF;
break;
- case AMDIL::ENDLOOP:
+ case AMDGPU::ENDLOOP:
instr = FC_ENDLOOP;
break;
- case AMDIL::WHILELOOP:
+ case AMDGPU::WHILELOOP:
instr = FC_BGNLOOP;
break;
default:
unsigned hwReg;
hwReg = TRI->getHWRegIndex(regNo);
- if (AMDIL::R600_CReg32RegClass.contains(regNo)) {
+ if (AMDGPU::R600_CReg32RegClass.contains(regNo)) {
hwReg += 512;
}
return hwReg;
print <<STRING;
class R600Reg <string name> : Register<name> {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
}
class R600Reg_128<string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
let SubRegIndices = [sel_x, sel_y, sel_z, sel_w];
}
def PV_X : R600Reg<"pv.x">;
def ALU_LITERAL_X : R600Reg<"literal.x">;
-def R600_CReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+def R600_CReg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
$creg_list)>;
-def R600_TReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+def R600_TReg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
$treg_string)>;
-def R600_TReg32_X : RegisterClass <"AMDIL", [f32, i32], 32, (add
+def R600_TReg32_X : RegisterClass <"AMDGPU", [f32, i32], 32, (add
$treg_x_string)>;
-def R600_Reg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
R600_TReg32,
R600_CReg32,
ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF)>;
-def R600_Reg128 : RegisterClass<"AMDIL", [v4f32, v4i32], 128, (add
+def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128, (add
$t128_string)>
{
let SubRegClasses = [(R600_TReg32 sel_x, sel_y, sel_z, sel_w)];
STRING
foreach my $key (keys(%index_map)) {
foreach my $reg (@{$index_map{$key}}) {
- print OUTFILE " case AMDIL::$reg:\n";
+ print OUTFILE " case AMDGPU::$reg:\n";
}
print OUTFILE " return $key;\n\n";
}
foreach my $key (keys(%chan_map)) {
foreach my $reg (@{$chan_map{$key}}) {
- print OUTFILE " case AMDIL::$reg:\n";
+ print OUTFILE " case AMDGPU::$reg:\n";
}
my $val;
if ($key eq 'X') {
TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo()))
{
setOperationAction(ISD::MUL, MVT::i64, Expand);
- addRegisterClass(MVT::v4f32, &AMDIL::R600_Reg128RegClass);
- addRegisterClass(MVT::f32, &AMDIL::R600_Reg32RegClass);
- addRegisterClass(MVT::v4i32, &AMDIL::R600_Reg128RegClass);
- addRegisterClass(MVT::i32, &AMDIL::R600_Reg32RegClass);
+ addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
+ addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
+ addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass);
computeRegisterProperties();
setOperationAction(ISD::FSUB, MVT::f32, Expand);
switch (MI->getOpcode()) {
default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
- case AMDIL::TGID_X:
- addLiveIn(MI, MF, MRI, TII, AMDIL::T1_X);
+ case AMDGPU::TGID_X:
+ addLiveIn(MI, MF, MRI, TII, AMDGPU::T1_X);
break;
- case AMDIL::TGID_Y:
- addLiveIn(MI, MF, MRI, TII, AMDIL::T1_Y);
+ case AMDGPU::TGID_Y:
+ addLiveIn(MI, MF, MRI, TII, AMDGPU::T1_Y);
break;
- case AMDIL::TGID_Z:
- addLiveIn(MI, MF, MRI, TII, AMDIL::T1_Z);
+ case AMDGPU::TGID_Z:
+ addLiveIn(MI, MF, MRI, TII, AMDGPU::T1_Z);
break;
- case AMDIL::TIDIG_X:
- addLiveIn(MI, MF, MRI, TII, AMDIL::T0_X);
+ case AMDGPU::TIDIG_X:
+ addLiveIn(MI, MF, MRI, TII, AMDGPU::T0_X);
break;
- case AMDIL::TIDIG_Y:
- addLiveIn(MI, MF, MRI, TII, AMDIL::T0_Y);
+ case AMDGPU::TIDIG_Y:
+ addLiveIn(MI, MF, MRI, TII, AMDGPU::T0_Y);
break;
- case AMDIL::TIDIG_Z:
- addLiveIn(MI, MF, MRI, TII, AMDIL::T0_Z);
+ case AMDGPU::TIDIG_Z:
+ addLiveIn(MI, MF, MRI, TII, AMDGPU::T0_Z);
break;
- case AMDIL::NGROUPS_X:
+ case AMDGPU::NGROUPS_X:
lowerImplicitParameter(MI, *BB, MRI, 0);
break;
- case AMDIL::NGROUPS_Y:
+ case AMDGPU::NGROUPS_Y:
lowerImplicitParameter(MI, *BB, MRI, 1);
break;
- case AMDIL::NGROUPS_Z:
+ case AMDGPU::NGROUPS_Z:
lowerImplicitParameter(MI, *BB, MRI, 2);
break;
- case AMDIL::GLOBAL_SIZE_X:
+ case AMDGPU::GLOBAL_SIZE_X:
lowerImplicitParameter(MI, *BB, MRI, 3);
break;
- case AMDIL::GLOBAL_SIZE_Y:
+ case AMDGPU::GLOBAL_SIZE_Y:
lowerImplicitParameter(MI, *BB, MRI, 4);
break;
- case AMDIL::GLOBAL_SIZE_Z:
+ case AMDGPU::GLOBAL_SIZE_Z:
lowerImplicitParameter(MI, *BB, MRI, 5);
break;
- case AMDIL::LOCAL_SIZE_X:
+ case AMDGPU::LOCAL_SIZE_X:
lowerImplicitParameter(MI, *BB, MRI, 6);
break;
- case AMDIL::LOCAL_SIZE_Y:
+ case AMDGPU::LOCAL_SIZE_Y:
lowerImplicitParameter(MI, *BB, MRI, 7);
break;
- case AMDIL::LOCAL_SIZE_Z:
+ case AMDGPU::LOCAL_SIZE_Z:
lowerImplicitParameter(MI, *BB, MRI, 8);
break;
- case AMDIL::CLAMP_R600:
+ case AMDGPU::CLAMP_R600:
MI->getOperand(0).addTargetFlag(MO_FLAG_CLAMP);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1));
break;
- case AMDIL::FABS_R600:
+ case AMDGPU::FABS_R600:
MI->getOperand(1).addTargetFlag(MO_FLAG_ABS);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1));
break;
- case AMDIL::FNEG_R600:
+ case AMDGPU::FNEG_R600:
MI->getOperand(1).addTargetFlag(MO_FLAG_NEG);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1));
break;
- case AMDIL::R600_LOAD_CONST:
+ case AMDGPU::R600_LOAD_CONST:
{
int64_t RegIndex = MI->getOperand(1).getImm();
- unsigned ConstantReg = AMDIL::R600_CReg32RegClass.getRegister(RegIndex);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::COPY))
+ unsigned ConstantReg = AMDGPU::R600_CReg32RegClass.getRegister(RegIndex);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::COPY))
.addOperand(MI->getOperand(0))
.addReg(ConstantReg);
break;
}
- case AMDIL::LOAD_INPUT:
+ case AMDGPU::LOAD_INPUT:
{
int64_t RegIndex = MI->getOperand(1).getImm();
addLiveIn(MI, MF, MRI, TII,
- AMDIL::R600_TReg32RegClass.getRegister(RegIndex));
+ AMDGPU::R600_TReg32RegClass.getRegister(RegIndex));
break;
}
- case AMDIL::MASK_WRITE:
+ case AMDGPU::MASK_WRITE:
{
unsigned maskedRegister = MI->getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
return BB;
}
- case AMDIL::RAT_WRITE_CACHELESS_eg:
+ case AMDGPU::RAT_WRITE_CACHELESS_eg:
{
// Convert to DWORD address
unsigned NewAddr = MRI.createVirtualRegister(
- AMDIL::R600_TReg32_XRegisterClass);
+ AMDGPU::R600_TReg32_XRegisterClass);
unsigned ShiftValue = MRI.createVirtualRegister(
- AMDIL::R600_TReg32RegisterClass);
+ AMDGPU::R600_TReg32RegisterClass);
// XXX In theory, we should be able to pass ShiftValue directly to
// the LSHR_eg instruction as an inline literal, but I tried doing it
// this way and it didn't produce the correct results.
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV), ShiftValue)
- .addReg(AMDIL::ALU_LITERAL_X)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV), ShiftValue)
+ .addReg(AMDGPU::ALU_LITERAL_X)
.addImm(2);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::LSHR_eg), NewAddr)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::LSHR_eg), NewAddr)
.addOperand(MI->getOperand(1))
.addReg(ShiftValue);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
break;
}
- case AMDIL::STORE_OUTPUT:
+ case AMDGPU::STORE_OUTPUT:
{
int64_t OutputIndex = MI->getOperand(1).getImm();
- unsigned OutputReg = AMDIL::R600_TReg32RegClass.getRegister(OutputIndex);
+ unsigned OutputReg = AMDGPU::R600_TReg32RegClass.getRegister(OutputIndex);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::COPY), OutputReg)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::COPY), OutputReg)
.addOperand(MI->getOperand(0));
if (!MRI.isLiveOut(OutputReg)) {
break;
}
- case AMDIL::RESERVE_REG:
+ case AMDGPU::RESERVE_REG:
{
R600MachineFunctionInfo * MFI = MF->getInfo<R600MachineFunctionInfo>();
int64_t ReservedIndex = MI->getOperand(0).getImm();
unsigned ReservedReg =
- AMDIL::R600_TReg32RegClass.getRegister(ReservedIndex);
+ AMDGPU::R600_TReg32RegClass.getRegister(ReservedIndex);
MFI->ReservedRegs.push_back(ReservedReg);
break;
}
- case AMDIL::TXD:
+ case AMDGPU::TXD:
{
- unsigned t0 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass);
- unsigned t1 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass);
+ unsigned t0 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass);
+ unsigned t1 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_H), t0)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), t0)
.addOperand(MI->getOperand(3))
.addOperand(MI->getOperand(4))
.addOperand(MI->getOperand(5));
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_V), t1)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), t1)
.addOperand(MI->getOperand(2))
.addOperand(MI->getOperand(4))
.addOperand(MI->getOperand(5));
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SAMPLE_G))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
.addOperand(MI->getOperand(4))
.addReg(t1, RegState::Implicit);
break;
}
- case AMDIL::TXD_SHADOW:
+ case AMDGPU::TXD_SHADOW:
{
- unsigned t0 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass);
- unsigned t1 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass);
+ unsigned t0 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass);
+ unsigned t1 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_H), t0)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), t0)
.addOperand(MI->getOperand(3))
.addOperand(MI->getOperand(4))
.addOperand(MI->getOperand(5));
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_V), t1)
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), t1)
.addOperand(MI->getOperand(2))
.addOperand(MI->getOperand(4))
.addOperand(MI->getOperand(5));
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SAMPLE_C_G))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
.addOperand(MI->getOperand(4))
MachineRegisterInfo & MRI, unsigned dword_offset) const
{
MachineBasicBlock::iterator I = *MI;
- unsigned PtrReg = MRI.createVirtualRegister(&AMDIL::R600_TReg32_XRegClass);
- MRI.setRegClass(MI->getOperand(0).getReg(), &AMDIL::R600_TReg32_XRegClass);
+ unsigned PtrReg = MRI.createVirtualRegister(&AMDGPU::R600_TReg32_XRegClass);
+ MRI.setRegClass(MI->getOperand(0).getReg(), &AMDGPU::R600_TReg32_XRegClass);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::MOV), PtrReg)
- .addReg(AMDIL::ALU_LITERAL_X)
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::MOV), PtrReg)
+ .addReg(AMDGPU::ALU_LITERAL_X)
.addImm(dword_offset * 4);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::VTX_READ_PARAM_eg))
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::VTX_READ_PARAM_eg))
.addOperand(MI->getOperand(0))
.addReg(PtrReg)
.addImm(0);
bool KillSrc) const
{
- unsigned subRegMap[4] = {AMDIL::sel_x, AMDIL::sel_y, AMDIL::sel_z, AMDIL::sel_w};
+ unsigned subRegMap[4] = {AMDGPU::sel_x, AMDGPU::sel_y,
+ AMDGPU::sel_z, AMDGPU::sel_w};
- if (AMDIL::R600_Reg128RegClass.contains(DestReg)
- && AMDIL::R600_Reg128RegClass.contains(SrcReg)) {
+ if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
+ && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
for (unsigned i = 0; i < 4; i++) {
- BuildMI(MBB, MI, DL, get(AMDIL::MOV))
+ BuildMI(MBB, MI, DL, get(AMDGPU::MOV))
.addReg(RI.getSubReg(DestReg, subRegMap[i]), RegState::Define)
.addReg(RI.getSubReg(SrcReg, subRegMap[i]))
.addReg(DestReg, RegState::Define | RegState::Implicit);
} else {
/* We can't copy vec4 registers */
- assert(!AMDIL::R600_Reg128RegClass.contains(DestReg)
- && !AMDIL::R600_Reg128RegClass.contains(SrcReg));
+ assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
+ && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
- BuildMI(MBB, MI, DL, get(AMDIL::MOV), DestReg)
+ BuildMI(MBB, MI, DL, get(AMDGPU::MOV), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
}
MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
unsigned DstReg, int64_t Imm) const
{
- MachineInstr * MI = MF->CreateMachineInstr(get(AMDIL::MOV), DebugLoc());
+ MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
- MachineInstrBuilder(MI).addReg(AMDIL::ALU_LITERAL_X);
+ MachineInstrBuilder(MI).addReg(AMDGPU::ALU_LITERAL_X);
MachineInstrBuilder(MI).addImm(Imm);
return MI;
unsigned R600InstrInfo::getIEQOpcode() const
{
- return AMDIL::SETE_INT;
+ return AMDGPU::SETE_INT;
}
bool R600InstrInfo::isMov(unsigned Opcode) const
{
switch(Opcode) {
default: return false;
- case AMDIL::MOV:
- case AMDIL::MOV_IMM_F32:
- case AMDIL::MOV_IMM_I32:
+ case AMDGPU::MOV:
+ case AMDGPU::MOV_IMM_F32:
+ case AMDGPU::MOV_IMM_I32:
return true;
}
}
bit isVector = 0;
let Inst = inst;
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
let OutOperandList = outs;
let InOperandList = ins;
let AsmString = asm;
{
field bits<64> Inst;
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
}
def MEMxi : Operand<iPTR> {
BitVector Reserved(getNumRegs());
const R600MachineFunctionInfo * MFI = MF.getInfo<R600MachineFunctionInfo>();
- Reserved.set(AMDIL::ZERO);
- Reserved.set(AMDIL::HALF);
- Reserved.set(AMDIL::ONE);
- Reserved.set(AMDIL::ONE_INT);
- Reserved.set(AMDIL::NEG_HALF);
- Reserved.set(AMDIL::NEG_ONE);
- Reserved.set(AMDIL::PV_X);
- Reserved.set(AMDIL::ALU_LITERAL_X);
+ Reserved.set(AMDGPU::ZERO);
+ Reserved.set(AMDGPU::HALF);
+ Reserved.set(AMDGPU::ONE);
+ Reserved.set(AMDGPU::ONE_INT);
+ Reserved.set(AMDGPU::NEG_HALF);
+ Reserved.set(AMDGPU::NEG_ONE);
+ Reserved.set(AMDGPU::PV_X);
+ Reserved.set(AMDGPU::ALU_LITERAL_X);
- for (TargetRegisterClass::iterator I = AMDIL::R600_CReg32RegClass.begin(),
- E = AMDIL::R600_CReg32RegClass.end(); I != E; ++I) {
+ for (TargetRegisterClass::iterator I = AMDGPU::R600_CReg32RegClass.begin(),
+ E = AMDGPU::R600_CReg32RegClass.end(); I != E; ++I) {
Reserved.set(*I);
}
R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
{
switch (rc->getID()) {
- case AMDIL::GPRF32RegClassID:
- case AMDIL::GPRI32RegClassID:
- return &AMDIL::R600_Reg32RegClass;
+ case AMDGPU::GPRF32RegClassID:
+ case AMDGPU::GPRI32RegClassID:
+ return &AMDGPU::R600_Reg32RegClass;
default: return rc;
}
}
unsigned R600RegisterInfo::getHWRegIndex(unsigned reg) const
{
switch(reg) {
- case AMDIL::ZERO: return 248;
- case AMDIL::ONE:
- case AMDIL::NEG_ONE: return 249;
- case AMDIL::ONE_INT: return 250;
- case AMDIL::HALF:
- case AMDIL::NEG_HALF: return 252;
- case AMDIL::ALU_LITERAL_X: return 253;
+ case AMDGPU::ZERO: return 248;
+ case AMDGPU::ONE:
+ case AMDGPU::NEG_ONE: return 249;
+ case AMDGPU::ONE_INT: return 250;
+ case AMDGPU::HALF:
+ case AMDGPU::NEG_HALF: return 252;
+ case AMDGPU::ALU_LITERAL_X: return 253;
default: return getHWRegIndexGen(reg);
}
}
unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const
{
switch(reg) {
- case AMDIL::ZERO:
- case AMDIL::ONE:
- case AMDIL::ONE_INT:
- case AMDIL::NEG_ONE:
- case AMDIL::HALF:
- case AMDIL::NEG_HALF:
- case AMDIL::ALU_LITERAL_X:
+ case AMDGPU::ZERO:
+ case AMDGPU::ONE:
+ case AMDGPU::ONE_INT:
+ case AMDGPU::NEG_ONE:
+ case AMDGPU::HALF:
+ case AMDGPU::NEG_HALF:
+ case AMDGPU::ALU_LITERAL_X:
return 0;
default: return getHWRegChanGen(reg);
}
{
switch(VT.SimpleTy) {
default:
- case MVT::i32: return AMDIL::R600_TReg32RegisterClass;
+ case MVT::i32: return AMDGPU::R600_TReg32RegisterClass;
}
}
#include "R600HwRegInfo.include"
{
struct interp_info InterpUse[INTERP_VALUES] = {
- {false, {AMDIL::PERSP_SAMPLE_I, AMDIL::PERSP_SAMPLE_J}, 2},
- {false, {AMDIL::PERSP_CENTER_I, AMDIL::PERSP_CENTER_J}, 2},
- {false, {AMDIL::PERSP_CENTROID_I, AMDIL::PERSP_CENTROID_J}, 2},
- {false, {AMDIL::PERSP_I_W, AMDIL::PERSP_J_W, AMDIL::PERSP_1_W}, 3},
- {false, {AMDIL::LINEAR_SAMPLE_I, AMDIL::LINEAR_SAMPLE_J}, 2},
- {false, {AMDIL::LINEAR_CENTER_I, AMDIL::LINEAR_CENTER_J}, 2},
- {false, {AMDIL::LINEAR_CENTROID_I, AMDIL::LINEAR_CENTROID_J}, 2},
- {false, {AMDIL::LINE_STIPPLE_TEX_COORD}, 1},
- {false, {AMDIL::POS_X_FLOAT}, 1},
- {false, {AMDIL::POS_Y_FLOAT}, 1},
- {false, {AMDIL::POS_Z_FLOAT}, 1},
- {false, {AMDIL::POS_W_FLOAT}, 1},
- {false, {AMDIL::FRONT_FACE}, 1},
- {false, {AMDIL::ANCILLARY}, 1},
- {false, {AMDIL::SAMPLE_COVERAGE}, 1},
- {false, {AMDIL::POS_FIXED_PT}, 1}
+ {false, {AMDGPU::PERSP_SAMPLE_I, AMDGPU::PERSP_SAMPLE_J}, 2},
+ {false, {AMDGPU::PERSP_CENTER_I, AMDGPU::PERSP_CENTER_J}, 2},
+ {false, {AMDGPU::PERSP_CENTROID_I, AMDGPU::PERSP_CENTROID_J}, 2},
+ {false, {AMDGPU::PERSP_I_W, AMDGPU::PERSP_J_W, AMDGPU::PERSP_1_W}, 3},
+ {false, {AMDGPU::LINEAR_SAMPLE_I, AMDGPU::LINEAR_SAMPLE_J}, 2},
+ {false, {AMDGPU::LINEAR_CENTER_I, AMDGPU::LINEAR_CENTER_J}, 2},
+ {false, {AMDGPU::LINEAR_CENTROID_I, AMDGPU::LINEAR_CENTROID_J}, 2},
+ {false, {AMDGPU::LINE_STIPPLE_TEX_COORD}, 1},
+ {false, {AMDGPU::POS_X_FLOAT}, 1},
+ {false, {AMDGPU::POS_Y_FLOAT}, 1},
+ {false, {AMDGPU::POS_Z_FLOAT}, 1},
+ {false, {AMDGPU::POS_W_FLOAT}, 1},
+ {false, {AMDGPU::FRONT_FACE}, 1},
+ {false, {AMDGPU::ANCILLARY}, 1},
+ {false, {AMDGPU::SAMPLE_COVERAGE}, 1},
+ {false, {AMDGPU::POS_FIXED_PT}, 1}
};
SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>();
for (unsigned reg_idx = 0; reg_idx < InterpUse[interp_idx].reg_count;
reg_idx++, used_vgprs++) {
- unsigned new_reg = AMDIL::VReg_32RegisterClass->getRegister(used_vgprs);
- unsigned virt_reg = MRI.createVirtualRegister(AMDIL::VReg_32RegisterClass);
+ unsigned new_reg = AMDGPU::VReg_32RegisterClass->getRegister(used_vgprs);
+ unsigned virt_reg = MRI.createVirtualRegister(AMDGPU::VReg_32RegisterClass);
MRI.replaceRegWith(InterpUse[interp_idx].regs[reg_idx], virt_reg);
AMDGPU::utilAddLiveIn(&MF, MRI, TM.getInstrInfo(), new_reg, virt_reg);
}
continue;
}
reg = MO.getReg();
- if (reg == AMDIL::VCC) {
+ if (reg == AMDGPU::VCC) {
VCCUsed = true;
continue;
}
- if (AMDIL::SReg_32RegClass.contains(reg)) {
+ if (AMDGPU::SReg_32RegClass.contains(reg)) {
isSGPR = true;
width = 1;
- } else if (AMDIL::VReg_32RegClass.contains(reg)) {
+ } else if (AMDGPU::VReg_32RegClass.contains(reg)) {
isSGPR = false;
width = 1;
- } else if (AMDIL::SReg_64RegClass.contains(reg)) {
+ } else if (AMDGPU::SReg_64RegClass.contains(reg)) {
isSGPR = true;
width = 2;
- } else if (AMDIL::VReg_64RegClass.contains(reg)) {
+ } else if (AMDGPU::VReg_64RegClass.contains(reg)) {
isSGPR = false;
width = 2;
- } else if (AMDIL::SReg_128RegClass.contains(reg)) {
+ } else if (AMDGPU::SReg_128RegClass.contains(reg)) {
isSGPR = true;
width = 4;
- } else if (AMDIL::VReg_128RegClass.contains(reg)) {
+ } else if (AMDGPU::VReg_128RegClass.contains(reg)) {
isSGPR = false;
width = 4;
- } else if (AMDIL::SReg_256RegClass.contains(reg)) {
+ } else if (AMDGPU::SReg_256RegClass.contains(reg)) {
isSGPR = true;
width = 8;
} else {
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
MachineInstr &MI = *I;
- if (MI.getOpcode() != AMDIL::KILL && MI.getOpcode() != AMDIL::RETURN) {
+ if (MI.getOpcode() != AMDGPU::KILL && MI.getOpcode() != AMDGPU::RETURN) {
emitInstr(MI);
}
}
}
// Emit S_END_PGM
MachineInstr * End = BuildMI(MF, DebugLoc(),
- TM->getInstrInfo()->get(AMDIL::S_ENDPGM));
+ TM->getInstrInfo()->get(AMDGPU::S_ENDPGM));
emitInstr(*End);
return false;
}
continue;
}
unsigned reg = MI.getOperand(opIdx).getReg();
- if (AMDIL::VReg_32RegClass.contains(reg)
- || AMDIL::VReg_64RegClass.contains(reg)) {
+ if (AMDGPU::VReg_32RegClass.contains(reg)
+ || AMDGPU::VReg_64RegClass.contains(reg)) {
Value |= (VGPR_BIT(opIdx)) << vgprBitOffset;
}
}
print <<STRING;
-let Namespace = "AMDIL" in {
+let Namespace = "AMDGPU" in {
def low : SubRegIndex;
def high : SubRegIndex;
}
class SIReg <string n> : Register<n> {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
}
class SI_64 <string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
let SubRegIndices = [low, high];
}
class SI_128 <string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
let SubRegIndices = [sel_x, sel_y, sel_z, sel_w];
}
class SI_256 <string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> {
- let Namespace = "AMDIL";
+ let Namespace = "AMDGPU";
let SubRegIndices = [sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7];
}
print <<STRING;
-def SReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add (sequence "SGPR%u", 0, $SGPR_MAX_IDX), SREG_LIT_0, M0)
>;
-def VReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+def VReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add (sequence "VGPR%u", 0, $VGPR_MAX_IDX),
PERSP_SAMPLE_I, PERSP_SAMPLE_J,
PERSP_CENTER_I, PERSP_CENTER_J,
)
>;
-def AllReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+def AllReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add VReg_32, SReg_32)
>;
-def CCReg : RegisterClass<"AMDIL", [f32], 32, (add VCC, SCC)>;
+def CCReg : RegisterClass<"AMDGPU", [f32], 32, (add VCC, SCC)>;
STRING
my $vgpr64_list = join(',', @VGPR64);
print <<STRING;
-def AllReg_64 : RegisterClass<"AMDIL", [f64, i64], 64,
+def AllReg_64 : RegisterClass<"AMDGPU", [f64, i64], 64,
(add $sgpr64_list, $vgpr64_list)
>;
for my $key (keys(%hw_values)) {
my @names = @{$hw_values{$key}};
for my $regname (@names) {
- print $fh " case AMDIL::$regname:\n"
+ print $fh " case AMDGPU::$regname:\n"
}
print $fh " return $key;\n";
}
}
my $reg_list = join(', ', @registers);
- print "def $class_prefix\_$reg_width : RegisterClass<\"AMDIL\", [" . join (', ', @types) . "], $reg_width,\n (add $reg_list)\n>{\n";
+ print "def $class_prefix\_$reg_width : RegisterClass<\"AMDGPU\", [" . join (', ', @types) . "], $reg_width,\n (add $reg_list)\n>{\n";
print " let SubRegClasses = [($class_prefix\_", ($reg_width / $component_count) , ' ', join(', ', @{$sub_reg_ref}), ")];\n}\n";
return @registers;
}
AMDGPUTargetLowering(TM),
TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo()))
{
- addRegisterClass(MVT::v4f32, &AMDIL::VReg_128RegClass);
- addRegisterClass(MVT::f32, &AMDIL::VReg_32RegClass);
- addRegisterClass(MVT::i32, &AMDIL::VReg_32RegClass);
- addRegisterClass(MVT::i64, &AMDIL::VReg_64RegClass);
+ addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::i64, &AMDGPU::VReg_64RegClass);
- addRegisterClass(MVT::v4i32, &AMDIL::SReg_128RegClass);
- addRegisterClass(MVT::v8i32, &AMDIL::SReg_256RegClass);
+ addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
+ addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
computeRegisterProperties();
default:
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
- case AMDIL::CLAMP_SI:
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::V_MOV_B32_e64))
+ case AMDGPU::CLAMP_SI:
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
// VSRC1-2 are unused, but we still need to fill all the
MI->eraseFromParent();
break;
- case AMDIL::FABS_SI:
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::V_MOV_B32_e64))
+ case AMDGPU::FABS_SI:
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
// VSRC1-2 are unused, but we still need to fill all the
MI->eraseFromParent();
break;
- case AMDIL::SI_INTERP:
+ case AMDGPU::SI_INTERP:
LowerSI_INTERP(MI, *BB, I, MRI);
break;
- case AMDIL::SI_INTERP_CONST:
+ case AMDGPU::SI_INTERP_CONST:
LowerSI_INTERP_CONST(MI, *BB, I);
break;
- case AMDIL::SI_V_CNDLT:
+ case AMDGPU::SI_V_CNDLT:
LowerSI_V_CNDLT(MI, *BB, I, MRI);
break;
- case AMDIL::USE_SGPR_32:
- case AMDIL::USE_SGPR_64:
+ case AMDGPU::USE_SGPR_32:
+ case AMDGPU::USE_SGPR_64:
lowerUSE_SGPR(MI, BB->getParent(), MRI);
MI->eraseFromParent();
break;
- case AMDIL::VS_LOAD_BUFFER_INDEX:
- addLiveIn(MI, BB->getParent(), MRI, TII, AMDIL::VGPR0);
+ case AMDGPU::VS_LOAD_BUFFER_INDEX:
+ addLiveIn(MI, BB->getParent(), MRI, TII, AMDGPU::VGPR0);
MI->eraseFromParent();
break;
}
void SITargetLowering::AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB,
MachineBasicBlock::iterator I) const
{
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_WAITCNT))
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WAITCNT))
.addImm(0);
}
void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
{
- unsigned tmp = MRI.createVirtualRegister(&AMDIL::VReg_32RegClass);
+ unsigned tmp = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
MachineOperand dst = MI->getOperand(0);
MachineOperand iReg = MI->getOperand(1);
MachineOperand jReg = MI->getOperand(2);
MachineOperand attr = MI->getOperand(4);
MachineOperand params = MI->getOperand(5);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32))
- .addReg(AMDIL::M0)
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32))
+ .addReg(AMDGPU::M0)
.addOperand(params);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P1_F32), tmp)
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P1_F32), tmp)
.addOperand(iReg)
.addOperand(attr_chan)
.addOperand(attr);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P2_F32))
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P2_F32))
.addOperand(dst)
.addReg(tmp)
.addOperand(jReg)
MachineOperand attr = MI->getOperand(2);
MachineOperand params = MI->getOperand(3);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32))
- .addReg(AMDIL::M0)
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32))
+ .addReg(AMDGPU::M0)
.addOperand(params);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_MOV_F32))
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_MOV_F32))
.addOperand(dst)
.addOperand(attr_chan)
.addOperand(attr);
void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
{
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CMP_LT_F32_e32))
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CMP_LT_F32_e32))
.addOperand(MI->getOperand(1))
- .addReg(AMDIL::SREG_LIT_0);
+ .addReg(AMDGPU::SREG_LIT_0);
- BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CNDMASK_B32))
+ BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CNDMASK_B32))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(2))
.addOperand(MI->getOperand(3));
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const
{
- BuildMI(MBB, MI, DL, get(AMDIL::V_MOV_B32_e32), DestReg)
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
}
/* This instruction always has a literal */
- if (MI.getOpcode() == AMDIL::S_MOV_IMM_I32) {
+ if (MI.getOpcode() == AMDGPU::S_MOV_IMM_I32) {
return 8;
}
MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
int64_t Imm) const
{
- MachineInstr * MI = MF->CreateMachineInstr(get(AMDIL::V_MOV_IMM_I32), DebugLoc());
+ MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_IMM_I32), DebugLoc());
MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
MachineInstrBuilder(MI).addImm(Imm);
{
switch(Opcode) {
default: return false;
- case AMDIL::S_MOV_B32:
- case AMDIL::S_MOV_B64:
- case AMDIL::V_MOV_B32_e32:
- case AMDIL::V_MOV_B32_e64:
- case AMDIL::V_MOV_IMM_F32:
- case AMDIL::V_MOV_IMM_I32:
- case AMDIL::S_MOV_IMM_I32:
+ case AMDGPU::S_MOV_B32:
+ case AMDGPU::S_MOV_B64:
+ case AMDGPU::V_MOV_B32_e32:
+ case AMDGPU::V_MOV_B32_e64:
+ case AMDGPU::V_MOV_IMM_F32:
+ case AMDGPU::V_MOV_IMM_I32:
+ case AMDGPU::S_MOV_IMM_I32:
return true;
}
}
unsigned SIRegisterInfo::getBinaryCode(unsigned reg) const
{
switch (reg) {
- case AMDIL::M0: return 124;
- case AMDIL::SREG_LIT_0: return 128;
+ case AMDGPU::M0: return 124;
+ case AMDGPU::SREG_LIT_0: return 128;
default: return getHWRegNum(reg);
}
}
SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
{
switch (rc->getID()) {
- case AMDIL::GPRF32RegClassID:
- return &AMDIL::VReg_32RegClass;
+ case AMDGPU::GPRF32RegClassID:
+ return &AMDGPU::VReg_32RegClass;
default: return rc;
}
}
{
switch(VT.SimpleTy) {
default:
- case MVT::i32: return AMDIL::VReg_32RegisterClass;
+ case MVT::i32: return AMDGPU::VReg_32RegisterClass;
}
}
#include "SIRegisterGetHWRegNum.inc"