1 //===- AMDILRegisterInfo.cpp - AMDIL Register Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
10 // The LLVM Compiler Infrastructure
12 // This file is distributed under the University of Illinois Open Source
13 // License. See LICENSE.TXT for details.
15 //===----------------------------------------------------------------------===//
17 // This file contains the AMDIL implementation of the TargetRegisterInfo class.
19 //===----------------------------------------------------------------------===//
21 #include "AMDILRegisterInfo.h"
23 #include "AMDILUtilityFunctions.h"
24 #include "llvm/ADT/BitVector.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 AMDILRegisterInfo::AMDILRegisterInfo(AMDILTargetMachine
&tm
,
31 const TargetInstrInfo
&tii
)
32 : AMDILGenRegisterInfo(0), // RA???
40 AMDILRegisterInfo::getCalleeSavedRegs(const MachineFunction
*MF
) const
42 static const uint16_t CalleeSavedRegs
[] = { 0 };
43 // TODO: Does IL need to actually have any callee saved regs?
44 // I don't think we do since we can just use sequential registers
45 // Maybe this would be easier if every function call was inlined first
46 // and then there would be no callee issues to deal with
47 //TODO(getCalleeSavedRegs);
48 return CalleeSavedRegs
;
52 AMDILRegisterInfo::getReservedRegs(const MachineFunction
&MF
) const
54 BitVector
Reserved(getNumRegs());
55 // We reserve the first getNumRegs() registers as they are the ones passed
56 // in live-in/live-out
57 // and therefor cannot be killed by the scheduler. This works around a bug
59 // that was causing the linearscan register allocator to kill registers
61 // function that were also passed as LiveIn registers.
62 for (unsigned int x
= 0, y
= 256; x
< y
; ++x
) {
69 AMDILRegisterInfo::getAllocatableSet(const MachineFunction
&MF
,
70 const TargetRegisterClass
*RC
= NULL
) const
72 BitVector
Allocatable(getNumRegs());
77 const TargetRegisterClass
* const*
78 AMDILRegisterInfo::getCalleeSavedRegClasses(const MachineFunction
*MF
) const
80 static const TargetRegisterClass
* const CalleeSavedRegClasses
[] = { 0 };
81 // TODO: Keep in sync with getCalleeSavedRegs
82 //TODO(getCalleeSavedRegClasses);
83 return CalleeSavedRegClasses
;
86 AMDILRegisterInfo::eliminateCallFramePseudoInstr(
88 MachineBasicBlock
&MBB
,
89 MachineBasicBlock::iterator I
) const
94 // For each frame index we find, we store the offset in the stack which is
95 // being pushed back into the global buffer. The offset into the stack where
96 // the value is stored is copied into a new register and the frame index is
97 // then replaced with that register.
99 AMDILRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II
,
101 RegScavenger
*RS
) const
103 assert(SPAdj
== 0 && "Unexpected");
104 MachineInstr
&MI
= *II
;
105 MachineFunction
&MF
= *MI
.getParent()->getParent();
106 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
107 unsigned int y
= MI
.getNumOperands();
108 for (unsigned int x
= 0; x
< y
; ++x
) {
109 if (!MI
.getOperand(x
).isFI()) {
112 bool def
= isStoreInst(TM
.getInstrInfo(), &MI
);
113 int FrameIndex
= MI
.getOperand(x
).getIndex();
114 int64_t Offset
= MFI
->getObjectOffset(FrameIndex
);
115 //int64_t Size = MF.getFrameInfo()->getObjectSize(FrameIndex);
116 // An optimization is to only use the offsets if the size
117 // is larger than 4, which means we are storing an array
118 // instead of just a pointer. If we are size 4 then we can
119 // just do register copies since we don't need to worry about
120 // indexing dynamically
121 MachineInstr
*nMI
= MF
.CreateMachineInstr(
122 TII
.get(AMDIL::LOADCONST_i32
), MI
.getDebugLoc());
123 nMI
->addOperand(MachineOperand::CreateReg(AMDIL::DFP
, true));
125 MachineOperand::CreateImm(Offset
));
126 MI
.getParent()->insert(II
, nMI
);
127 nMI
= MF
.CreateMachineInstr(
128 TII
.get(AMDIL::ADD_i32
), MI
.getDebugLoc());
129 nMI
->addOperand(MachineOperand::CreateReg(AMDIL::DFP
, true));
130 nMI
->addOperand(MachineOperand::CreateReg(AMDIL::DFP
, false));
131 nMI
->addOperand(MachineOperand::CreateReg(AMDIL::FP
, false));
133 MI
.getParent()->insert(II
, nMI
);
134 if (MI
.getOperand(x
).isReg() == false) {
135 MI
.getOperand(x
).ChangeToRegister(
136 nMI
->getOperand(0).getReg(), def
);
138 MI
.getOperand(x
).setReg(
139 nMI
->getOperand(0).getReg());
145 AMDILRegisterInfo::processFunctionBeforeFrameFinalized(
146 MachineFunction
&MF
) const
148 //TODO(processFunctionBeforeFrameFinalized);
149 // Here we keep track of the amount of stack that the current function
151 // that we can set the offset to the end of the stack and any other
153 // will not overwrite any stack variables.
154 // baseOffset = nextFuncOffset;
155 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
157 for (uint32_t x
= 0, y
= MFI
->getNumObjects(); x
< y
; ++x
) {
158 int64_t size
= MFI
->getObjectSize(x
);
159 if (!(size
% 4) && size
> 1) {
160 nextFuncOffset
+= size
;
162 nextFuncOffset
+= 16;
167 AMDILRegisterInfo::getRARegister() const
173 AMDILRegisterInfo::getFrameRegister(const MachineFunction
&MF
) const
179 AMDILRegisterInfo::getEHExceptionRegister() const
181 assert(0 && "What is the exception register");
186 AMDILRegisterInfo::getEHHandlerRegister() const
188 assert(0 && "What is the exception handler register");
193 AMDILRegisterInfo::getStackSize() const
195 return nextFuncOffset
- baseOffset
;
198 #define GET_REGINFO_TARGET_DESC
199 #include "AMDILGenRegisterInfo.inc"