cmdO = MemCmd::StoreCondReq;
}
- auto req = new Request(ev->getAddr(), ev->getSize(), flags, 0);
+ auto req = std::make_shared<Request>(ev->getAddr(), ev->getSize(), flags, 0);
req->setContext(ev->getGroupId());
auto pkt = new Packet(req, cmdO);
// copy the payload and then destroy gem5 packet
resp->setPayload(pkt->getSize(), pkt->getPtr<uint8_t>());
- delete pkt->req;
delete pkt;
nic->send(resp);
if (simPhase == INIT) {
link->sendInitData(ev);
- delete pkt->req;
delete pkt;
} else {
if (pkt->needsResponse()) {
// make Req/Pkt for Snoop/no response needed
// presently no consideration for masterId, packet type, flags...
- RequestPtr req = new Request(event->getAddr(), event->getSize(), 0, 0);
+ RequestPtr req = std::make_shared<Request>(
+ event->getAddr(), event->getSize(), 0, 0);
+
auto pkt = new Packet(req, ::MemCmd::InvalidateReq);
// Clear out bus delay notifications
template <class XC>
inline void
-handleLockedRead(XC *xc, RequestPtr req)
+handleLockedRead(XC *xc, const RequestPtr &req)
{
xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr() & ~0xf);
xc->setMiscReg(MISCREG_LOCKFLAG, true);
template <class XC>
inline bool
-handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
{
if (req->isUncacheable()) {
// Funky Turbolaser mailbox access...don't update
}
Fault
-TLB::checkCacheability(RequestPtr &req, bool itb)
+TLB::checkCacheability(const RequestPtr &req, bool itb)
{
// in Alpha, cacheability is controlled by upper-level bits of the
// physical address
}
Fault
-TLB::translateInst(RequestPtr req, ThreadContext *tc)
+TLB::translateInst(const RequestPtr &req, ThreadContext *tc)
{
//If this is a pal pc, then set PHYSICAL
if (FullSystem && PcPAL(req->getPC()))
}
Fault
-TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
+TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
{
mode_type mode =
(mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
}
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
if (mode == Execute)
return translateInst(req, tc);
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
assert(translation);
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req, ThreadContext *tc,
+ Mode mode) const
{
return NoFault;
}
return unimplBits == 0 || unimplBits == VAddrUnImplMask;
}
- static Fault checkCacheability(RequestPtr &req, bool itb = false);
+ static Fault checkCacheability(const RequestPtr &req, bool itb = false);
// Checkpointing
void serialize(CheckpointOut &cp) const override;
}
protected:
- Fault translateData(RequestPtr req, ThreadContext *tc, bool write);
- Fault translateInst(RequestPtr req, ThreadContext *tc);
+ Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write);
+ Fault translateInst(const RequestPtr &req, ThreadContext *tc);
public:
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req, ThreadContext *tc,
+ Mode mode) const override;
};
} // namespace AlphaISA
// can't be an atomic translation because that causes problems
// with unexpected atomic snoop requests.
warn("Translating via MISCREG(%d) in functional mode! Fix Me!\n", misc_reg);
- Request req(0, val, 0, flags, Request::funcMasterId,
- tc->pcState().pc(), tc->contextId());
+
+ auto req = std::make_shared<Request>(
+ 0, val, 0, flags, Request::funcMasterId,
+ tc->pcState().pc(), tc->contextId());
+
fault = getDTBPtr(tc)->translateFunctional(
- &req, tc, mode, tranType);
+ req, tc, mode, tranType);
+
TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
HCR hcr = readMiscRegNoEffect(MISCREG_HCR);
MiscReg newVal;
if (fault == NoFault) {
- Addr paddr = req.getPaddr();
+ Addr paddr = req->getPaddr();
if (haveLPAE && (ttbcr.eae || tranType & TLB::HypMode ||
((tranType & TLB::S1S2NsTran) && hcr.vm) )) {
newVal = (paddr & mask(39, 12)) |
case MISCREG_AT_S1E3R_Xt:
case MISCREG_AT_S1E3W_Xt:
{
- RequestPtr req = new Request;
+ RequestPtr req = std::make_shared<Request>();
Request::Flags flags = 0;
BaseTLB::Mode mode = BaseTLB::Read;
TLB::ArmTranslationType tranType = TLB::NormalTran;
"MISCREG: Translated addr %#x fault fsr %#x: PAR: %#x\n",
val, fsr, newVal);
}
- delete req;
setMiscRegNoEffect(MISCREG_PAR_EL1, newVal);
return;
}
template <class XC>
inline void
-handleLockedRead(XC *xc, RequestPtr req)
+handleLockedRead(XC *xc, const RequestPtr &req)
{
xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr());
xc->setMiscReg(MISCREG_LOCKFLAG, true);
template <class XC>
inline bool
-handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
{
if (req->isSwap())
return true;
Stage2LookUp::getTe(ThreadContext *tc, TlbEntry *destTe)
{
- fault = stage2Tlb->getTE(&stage2Te, &req, tc, mode, this, timing,
+ fault = stage2Tlb->getTE(&stage2Te, req, tc, mode, this, timing,
functional, false, tranType);
// Call finish if we're done already
if ((fault != NoFault) || (stage2Te != NULL)) {
// entry is now in the TLB this should always hit the cache.
if (fault == NoFault) {
if (ELIs64(tc, EL2))
- fault = stage2Tlb->checkPermissions64(stage2Te, &req, mode, tc);
+ fault = stage2Tlb->checkPermissions64(stage2Te, req, mode, tc);
else
- fault = stage2Tlb->checkPermissions(stage2Te, &req, mode);
+ fault = stage2Tlb->checkPermissions(stage2Te, req, mode);
}
- mergeTe(&req, mode);
+ mergeTe(req, mode);
*destTe = stage1Te;
}
return fault;
}
void
-Stage2LookUp::mergeTe(RequestPtr req, BaseTLB::Mode mode)
+Stage2LookUp::mergeTe(const RequestPtr &req, BaseTLB::Mode mode)
{
// Check again that we haven't got a fault
if (fault == NoFault) {
}
void
-Stage2LookUp::finish(const Fault &_fault, RequestPtr req,
+Stage2LookUp::finish(const Fault &_fault, const RequestPtr &req,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = _fault;
bool functional;
TLB::ArmTranslationType tranType;
TlbEntry *stage2Te;
- Request req;
+ RequestPtr req;
Fault fault;
bool complete;
bool selfDelete;
public:
- Stage2LookUp(TLB *s1Tlb, TLB *s2Tlb, TlbEntry s1Te, RequestPtr _req,
+ Stage2LookUp(TLB *s1Tlb, TLB *s2Tlb, TlbEntry s1Te, const RequestPtr &_req,
TLB::Translation *_transState, BaseTLB::Mode _mode, bool _timing,
bool _functional, TLB::ArmTranslationType _tranType) :
stage1Tlb(s1Tlb), stage2Tlb(s2Tlb), stage1Te(s1Te), s1Req(_req),
functional(_functional), tranType(_tranType), stage2Te(nullptr),
fault(NoFault), complete(false), selfDelete(false)
{
- req.setVirt(0, s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
- s1Req->getFlags(), s1Req->masterId(), 0);
+ req = std::make_shared<Request>();
+ req->setVirt(0, s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
+ s1Req->getFlags(), s1Req->masterId(), 0);
}
Fault getTe(ThreadContext *tc, TlbEntry *destTe);
- void mergeTe(RequestPtr req, BaseTLB::Mode mode);
+ void mergeTe(const RequestPtr &req, BaseTLB::Mode mode);
void setSelfDelete() { selfDelete = true; }
void markDelayed() {}
- void finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode);
};
Fault fault;
// translate to physical address using the second stage MMU
- Request req = Request();
- req.setVirt(0, descAddr, numBytes, flags | Request::PT_WALK, masterId, 0);
+ auto req = std::make_shared<Request>();
+ req->setVirt(0, descAddr, numBytes, flags | Request::PT_WALK, masterId, 0);
if (isFunctional) {
- fault = stage2Tlb()->translateFunctional(&req, tc, BaseTLB::Read);
+ fault = stage2Tlb()->translateFunctional(req, tc, BaseTLB::Read);
} else {
- fault = stage2Tlb()->translateAtomic(&req, tc, BaseTLB::Read);
+ fault = stage2Tlb()->translateAtomic(req, tc, BaseTLB::Read);
}
// Now do the access.
- if (fault == NoFault && !req.getFlags().isSet(Request::NO_ACCESS)) {
- Packet pkt = Packet(&req, MemCmd::ReadReq);
+ if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
+ Packet pkt = Packet(req, MemCmd::ReadReq);
pkt.dataStatic(data);
if (isFunctional) {
port.sendFunctional(&pkt);
}
void
-Stage2MMU::Stage2Translation::finish(const Fault &_fault, RequestPtr req,
+Stage2MMU::Stage2Translation::finish(const Fault &_fault,
+ const RequestPtr &req,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = _fault;
class Stage2Translation : public BaseTLB::Translation
{
private:
- uint8_t *data;
- int numBytes;
- Request req;
- Event *event;
- Stage2MMU &parent;
- Addr oVAddr;
+ uint8_t *data;
+ int numBytes;
+ RequestPtr req;
+ Event *event;
+ Stage2MMU &parent;
+ Addr oVAddr;
public:
Fault fault;
markDelayed() {}
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode);
void setVirt(Addr vaddr, int size, Request::Flags flags, int masterId)
{
numBytes = size;
- req.setVirt(0, vaddr, size, flags, masterId, 0);
+ req->setVirt(0, vaddr, size, flags, masterId, 0);
}
void translateTiming(ThreadContext *tc)
{
- parent.stage2Tlb()->translateTiming(&req, tc, this, BaseTLB::Read);
+ parent.stage2Tlb()->translateTiming(req, tc, this, BaseTLB::Read);
}
};
}
Fault
-TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
+TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
TLB::Translation *_trans, bool _timing, bool _functional,
bool secure, TLB::ArmTranslationType tranType,
currState->tc->getCpuPtr()->clockPeriod(), flags);
(this->*doDescriptor)();
} else {
- RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(
+ descAddr, numBytes, flags, masterId);
+
req->taskId(ContextSwitchTaskId::DMA);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
pkt->dataStatic(data);
port->sendFunctional(pkt);
(this->*doDescriptor)();
- delete req;
delete pkt;
}
}
void regStats() override;
- Fault walk(RequestPtr req, ThreadContext *tc, uint16_t asid, uint8_t _vmid,
+ Fault walk(const RequestPtr &req, ThreadContext *tc,
+ uint16_t asid, uint8_t _vmid,
bool _isHyp, TLB::Mode mode, TLB::Translation *_trans,
bool timing, bool functional, bool secure,
TLB::ArmTranslationType tranType, bool _stage2Req);
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const
{
const Addr paddr = req->getPaddr();
}
Fault
-TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
+TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay, bool timing)
{
updateMiscReg(tc);
}
Fault
-TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
+TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
{
// a data cache maintenance instruction that operates by MVA does
// not generate a Data Abort exeception due to a Permission fault
Fault
-TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
+TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
ThreadContext *tc)
{
assert(aarch64);
}
Fault
-TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
+TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay, bool timing,
TLB::ArmTranslationType tranType, bool functional)
{
}
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
TLB::ArmTranslationType tranType)
{
updateMiscReg(tc, tranType);
}
Fault
-TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
+TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
TLB::ArmTranslationType tranType)
{
updateMiscReg(tc, tranType);
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
{
updateMiscReg(tc, tranType);
}
Fault
-TLB::translateComplete(RequestPtr req, ThreadContext *tc,
+TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
bool callFromS2)
{
}
Fault
-TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
+TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool timing, bool functional,
bool is_secure, TLB::ArmTranslationType tranType)
{
}
Fault
-TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
+TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
+ ThreadContext *tc, Mode mode,
Translation *translation, bool timing, bool functional,
TlbEntry *mergeTe)
{
}
Fault
-TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
+TLB::testTranslation(const RequestPtr &req, Mode mode,
+ TlbEntry::DomainType domain)
{
if (!test || !req->hasSize() || req->getSize() == 0 ||
req->isCacheMaintenance()) {
* @param mode Access type
* @param domain Domain type
*/
- virtual Fault translationCheck(RequestPtr req, bool is_priv,
+ virtual Fault translationCheck(const RequestPtr &req, bool is_priv,
BaseTLB::Mode mode,
TlbEntry::DomainType domain) = 0;
void insert(Addr vaddr, TlbEntry &pte);
- Fault getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
+ Fault getTE(TlbEntry **te, const RequestPtr &req,
+ ThreadContext *tc, Mode mode,
Translation *translation, bool timing, bool functional,
bool is_secure, ArmTranslationType tranType);
- Fault getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc,
- Mode mode, Translation *translation, bool timing,
+ Fault getResultTe(TlbEntry **te, const RequestPtr &req,
+ ThreadContext *tc, Mode mode,
+ Translation *translation, bool timing,
bool functional, TlbEntry *mergeTe);
- Fault checkPermissions(TlbEntry *te, RequestPtr req, Mode mode);
- Fault checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
+ Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode);
+ Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
ThreadContext *tc);
*/
void flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el);
- Fault trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain);
- Fault walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
- bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level);
+ Fault trickBoxCheck(const RequestPtr &req, Mode mode,
+ TlbEntry::DomainType domain);
+
+ Fault walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz,
+ bool is_exec, bool is_write,
+ TlbEntry::DomainType domain,
+ LookupLevel lookup_level);
void printTlb() const;
* Do a functional lookup on the TLB (for checker cpu) that
* behaves like a normal lookup without modifying any page table state.
*/
- Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
- ArmTranslationType tranType);
+ Fault translateFunctional(const RequestPtr &req, ThreadContext *tc,
+ Mode mode, ArmTranslationType tranType);
Fault
- translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode) override
+ translateFunctional(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) override
{
return translateFunctional(req, tc, mode, NormalTran);
}
return _attr;
}
- Fault translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
+ Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay,
bool timing, ArmTranslationType tranType, bool functional = false);
- Fault translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
+ Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay, bool timing);
- Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
+ Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
ArmTranslationType tranType);
Fault
- translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode) override
+ translateAtomic(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) override
{
return translateAtomic(req, tc, mode, NormalTran);
}
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode,
ArmTranslationType tranType);
void
- translateTiming(RequestPtr req, ThreadContext *tc,
+ translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override
{
translateTiming(req, tc, translation, mode, NormalTran);
}
- Fault translateComplete(RequestPtr req, ThreadContext *tc,
+ Fault translateComplete(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, ArmTranslationType tranType,
bool callFromS2);
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const override;
void drainResume() override;
bool checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el);
public: /* Testing */
- Fault testTranslation(RequestPtr req, Mode mode,
+ Fault testTranslation(const RequestPtr &req, Mode mode,
TlbEntry::DomainType domain);
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
TlbEntry::DomainType domain,
parsingStarted(false), mismatch(false),
mismatchOnPcOrOpcode(false), parent(_parent)
{
+ memReq = std::make_shared<Request>();
}
void
TarmacParserRecord::readMemNoEffect(Addr addr, uint8_t *data, unsigned size,
unsigned flags)
{
- Request* req = &memReq;
+ const RequestPtr &req = memReq;
TheISA::TLB* dtb = static_cast<TLB*>(thread->getDTBPtr());
req->setVirt(0, addr, size, flags, thread->pcState().instAddr(),
bool mismatchOnPcOrOpcode;
/** Request for memory write checks. */
- Request memReq;
+ RequestPtr memReq;
protected:
TarmacParser& parent;
Fault fault;
// Set up a functional memory Request to pass to the TLB
// to get it to translate the vaddr to a paddr
- Request req(0, addr, 64, 0x40, -1, 0, 0);
+ auto req = std::make_shared<Request>(0, addr, 64, 0x40, -1, 0, 0);
ArmISA::TLB *tlb;
// Check the TLBs for a translation
// Calling translateFunctional invokes a table-walk if required
// so we should always succeed
tlb = static_cast<ArmISA::TLB*>(tc->getDTBPtr());
- fault = tlb->translateFunctional(&req, tc, BaseTLB::Read, TLB::NormalTran);
+ fault = tlb->translateFunctional(req, tc, BaseTLB::Read, TLB::NormalTran);
if (fault == NoFault)
- return std::make_pair(true, req.getPaddr());
+ return std::make_pair(true, req->getPaddr());
tlb = static_cast<ArmISA::TLB*>(tc->getITBPtr());
- fault = tlb->translateFunctional(&req, tc, BaseTLB::Read, TLB::NormalTran);
+ fault = tlb->translateFunctional(req, tc, BaseTLB::Read, TLB::NormalTran);
if (fault == NoFault)
- return std::make_pair(true, req.getPaddr());
+ return std::make_pair(true, req->getPaddr());
return std::make_pair(false, 0);
}
template <class XC>
inline void
-handleLockedRead(XC *xc, RequestPtr req)
+handleLockedRead(XC *xc, const RequestPtr &req)
{
}
template <class XC>
inline bool
-handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
{
return true;
}
#include "sim/process.hh"
Fault
-GenericTLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode)
+GenericTLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode)
{
if (FullSystem)
panic("Generic translation shouldn't be used in full system mode.\n");
}
void
-GenericTLB::translateTiming(RequestPtr req, ThreadContext *tc,
+GenericTLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
assert(translation);
}
Fault
-GenericTLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+GenericTLB::finalizePhysical(const RequestPtr &req, ThreadContext *tc,
+ Mode mode) const
{
return NoFault;
}
* be responsible for cleaning itself up which will happen in this
* function. Once it's called, the object is no longer valid.
*/
- virtual void finish(const Fault &fault, RequestPtr req,
+ virtual void finish(const Fault &fault, const RequestPtr &req,
ThreadContext *tc, Mode mode) = 0;
/** This function is used by the page table walker to determine if it
virtual void demapPage(Addr vaddr, uint64_t asn) = 0;
virtual Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) = 0;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) = 0;
virtual void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) = 0;
virtual Fault
- translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
+ translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
panic("Not implemented.\n");
}
* @return A fault on failure, NoFault otherwise.
*/
virtual Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const = 0;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) const = 0;
/**
* Remove all entries from the TLB
void demapPage(Addr vaddr, uint64_t asn) override;
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) const override;
};
#endif // __ARCH_GENERIC_TLB_HH__
*d = gpuDynInst->wavefront()->ldsChunk->
read<c0>(vaddr);
} else {
- RequestPtr req = new Request(0,
+ RequestPtr req = std::make_shared<Request>(0,
vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
gpuDynInst->statusBitVector = VectorMask(1);
gpuDynInst->useContinuation = false;
// create request
- RequestPtr req = new Request(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
gpuDynInst->execContinuation = &GPUStaticInst::execSt;
gpuDynInst->useContinuation = true;
// create request
- RequestPtr req = new Request(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
gpuDynInst->wavefront()->ldsChunk->write<c0>(vaddr,
*d);
} else {
- RequestPtr req =
- new Request(0, vaddr, sizeof(c0), 0,
- gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId);
+ RequestPtr req = std::make_shared<Request>(
+ 0, vaddr, sizeof(c0), 0,
+ gpuDynInst->computeUnit()->masterId(),
+ 0, gpuDynInst->wfDynId);
gpuDynInst->setRequestFlags(req);
PacketPtr pkt = new Packet(req, MemCmd::WriteReq);
gpuDynInst->useContinuation = true;
// create request
- RequestPtr req = new Request(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
}
} else {
RequestPtr req =
- new Request(0, vaddr, sizeof(c0), 0,
+ std::make_shared<Request>(0, vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId,
gpuDynInst->makeAtomicOpFunctor<c0>(e,
// the acquire completes
gpuDynInst->useContinuation = false;
// create request
- RequestPtr req = new Request(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
template <class XC>
inline void
-handleLockedRead(XC *xc, RequestPtr req)
+handleLockedRead(XC *xc, const RequestPtr &req)
{
xc->setMiscReg(MISCREG_LLADDR, req->getPaddr() & ~0xf);
xc->setMiscReg(MISCREG_LLFLAG, true);
template <class XC>
inline bool
-handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
{
if (req->isUncacheable()) {
// Funky Turbolaser mailbox access...don't update
}
inline Fault
-TLB::checkCacheability(RequestPtr &req)
+TLB::checkCacheability(const RequestPtr &req)
{
Addr VAddrUncacheable = 0xA0000000;
// In MIPS, cacheability is controlled by certain bits of the virtual
}
Fault
-TLB::translateInst(RequestPtr req, ThreadContext *tc)
+TLB::translateInst(const RequestPtr &req, ThreadContext *tc)
{
if (FullSystem)
panic("translateInst not implemented in MIPS.\n");
}
Fault
-TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
+TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
{
if (FullSystem)
panic("translateData not implemented in MIPS.\n");
}
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
if (mode == Execute)
return translateInst(req, tc);
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
assert(translation);
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const
{
return NoFault;
}
// static helper functions... really
static bool validVirtualAddress(Addr vaddr);
- static Fault checkCacheability(RequestPtr &req);
+ static Fault checkCacheability(const RequestPtr &req);
// Checkpointing
void serialize(CheckpointOut &cp) const override;
void regStats() override;
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const override;
private:
- Fault translateInst(RequestPtr req, ThreadContext *tc);
- Fault translateData(RequestPtr req, ThreadContext *tc, bool write);
+ Fault translateInst(const RequestPtr &req, ThreadContext *tc);
+ Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write);
};
}
}
inline Fault
-TLB::checkCacheability(RequestPtr &req)
+TLB::checkCacheability(const RequestPtr &req)
{
Addr VAddrUncacheable = 0xA0000000;
if ((req->getVaddr() & VAddrUncacheable) == VAddrUncacheable) {
}
Fault
-TLB::translateInst(RequestPtr req, ThreadContext *tc)
+TLB::translateInst(const RequestPtr &req, ThreadContext *tc)
{
// Instruction accesses must be word-aligned
if (req->getVaddr() & 0x3) {
}
Fault
-TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
+TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
{
Process * p = tc->getProcessPtr();
}
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
if (FullSystem)
fatal("translate atomic not yet implemented in full system mode.\n");
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
assert(translation);
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const
{
return NoFault;
}
// static helper functions... really
static bool validVirtualAddress(Addr vaddr);
- static Fault checkCacheability(RequestPtr &req);
- Fault translateInst(RequestPtr req, ThreadContext *tc);
- Fault translateData(RequestPtr req, ThreadContext *tc, bool write);
+ static Fault checkCacheability(const RequestPtr &req);
+ Fault translateInst(const RequestPtr &req, ThreadContext *tc);
+ Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write);
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const override;
// Checkpointing
void serialize(CheckpointOut &cp) const override;
template <class XC> inline void
-handleLockedRead(XC *xc, RequestPtr req)
+handleLockedRead(XC *xc, const RequestPtr &req)
{
locked_addrs.push(req->getPaddr() & ~0xF);
DPRINTF(LLSC, "[cid:%d]: Reserved address %x.\n",
{}
template <class XC> inline bool
-handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
{
// Normally RISC-V uses zero to indicate success and nonzero to indicate
// failure (right now only 1 is reserved), but in gem5 zero indicates
}
inline Fault
-TLB::checkCacheability(RequestPtr &req)
+TLB::checkCacheability(const RequestPtr &req)
{
Addr VAddrUncacheable = 0xA0000000;
// In MIPS, cacheability is controlled by certain bits of the virtual
}
Fault
-TLB::translateInst(RequestPtr req, ThreadContext *tc)
+TLB::translateInst(const RequestPtr &req, ThreadContext *tc)
{
if (FullSystem)
panic("translateInst not implemented in RISC-V.\n");
}
Fault
-TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
+TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
{
if (FullSystem)
panic("translateData not implemented in RISC-V.\n");
}
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
if (mode == Execute)
return translateInst(req, tc);
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
assert(translation);
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const
{
return NoFault;
}
// static helper functions... really
static bool validVirtualAddress(Addr vaddr);
- static Fault checkCacheability(RequestPtr &req);
+ static Fault checkCacheability(const RequestPtr &req);
// Checkpointing
void serialize(CheckpointOut &cp) const override;
void regStats() override;
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const override;
private:
- Fault translateInst(RequestPtr req, ThreadContext *tc);
- Fault translateData(RequestPtr req, ThreadContext *tc, bool write);
+ Fault translateInst(const RequestPtr &req, ThreadContext *tc);
+ Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write);
};
}
}
Fault
-TLB::translateInst(RequestPtr req, ThreadContext *tc)
+TLB::translateInst(const RequestPtr &req, ThreadContext *tc)
{
uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
}
Fault
-TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
+TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
{
/*
* @todo this could really use some profiling and fixing to make
};
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
if (mode == Execute)
return translateInst(req, tc);
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
assert(translation);
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const
{
return NoFault;
}
void writeTagAccess(Addr va, int context);
- Fault translateInst(RequestPtr req, ThreadContext *tc);
- Fault translateData(RequestPtr req, ThreadContext *tc, bool write);
+ Fault translateInst(const RequestPtr &req, ThreadContext *tc);
+ Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write);
public:
typedef SparcTLBParams Params;
void dumpAll();
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
Fault finalizePhysical(
- RequestPtr req, ThreadContext *tc, Mode mode) const override;
+ const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const override;
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt);
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt);
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs);
static inline PacketPtr
prepIntRequest(const uint8_t id, Addr offset, Addr size)
{
- RequestPtr req = new Request(x86InterruptAddress(id, offset),
- size, Request::UNCACHEABLE,
- Request::intMasterId);
+ RequestPtr req = std::make_shared<Request>(
+ x86InterruptAddress(id, offset),
+ size, Request::UNCACHEABLE,
+ Request::intMasterId);
+
PacketPtr pkt = new Packet(req, MemCmd::MessageReq);
pkt->allocate();
return pkt;
Fault
Walker::start(ThreadContext * _tc, BaseTLB::Translation *_translation,
- RequestPtr _req, BaseTLB::Mode _mode)
+ const RequestPtr &_req, BaseTLB::Mode _mode)
{
// TODO: in timing mode, instead of blocking when there are other
// outstanding requests, see if this request can be coalesced with
//If we didn't return, we're setting up another read.
Request::Flags flags = oldRead->req->getFlags();
flags.set(Request::UNCACHEABLE, uncacheable);
- RequestPtr request =
- new Request(nextRead, oldRead->getSize(), flags, walker->masterId);
+ RequestPtr request = std::make_shared<Request>(
+ nextRead, oldRead->getSize(), flags, walker->masterId);
read = new Packet(request, MemCmd::ReadReq);
read->allocate();
// If we need to write, adjust the read packet to write the modified
write->cmd = MemCmd::WriteReq;
} else {
write = NULL;
- delete oldRead->req;
delete oldRead;
}
}
Walker::WalkerState::endWalk()
{
nextState = Ready;
- delete read->req;
delete read;
read = NULL;
}
Request::Flags flags = Request::PHYSICAL;
if (cr3.pcd)
flags.set(Request::UNCACHEABLE);
- RequestPtr request = new Request(topAddr, dataSize, flags,
- walker->masterId);
+
+ RequestPtr request = std::make_shared<Request>(
+ topAddr, dataSize, flags, walker->masterId);
+
read = new Packet(request, MemCmd::ReadReq);
read->allocate();
}
bool started;
public:
WalkerState(Walker * _walker, BaseTLB::Translation *_translation,
- RequestPtr _req, bool _isFunctional = false) :
- walker(_walker), req(_req), state(Ready),
- nextState(Ready), inflight(0),
- translation(_translation),
- functional(_isFunctional), timing(false),
- retrying(false), started(false)
+ const RequestPtr &_req, bool _isFunctional = false) :
+ walker(_walker), req(_req), state(Ready),
+ nextState(Ready), inflight(0),
+ translation(_translation),
+ functional(_isFunctional), timing(false),
+ retrying(false), started(false)
{
}
void initState(ThreadContext * _tc, BaseTLB::Mode _mode,
public:
// Kick off the state machine.
Fault start(ThreadContext * _tc, BaseTLB::Translation *translation,
- RequestPtr req, BaseTLB::Mode mode);
+ const RequestPtr &req, BaseTLB::Mode mode);
Fault startFunctional(ThreadContext * _tc, Addr &addr,
unsigned &logBytes, BaseTLB::Mode mode);
BaseMasterPort &getMasterPort(const std::string &if_name,
}
Fault
-TLB::translateInt(RequestPtr req, ThreadContext *tc)
+TLB::translateInt(const RequestPtr &req, ThreadContext *tc)
{
DPRINTF(TLB, "Addresses references internal memory.\n");
Addr vaddr = req->getVaddr();
}
Fault
-TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+TLB::finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const
{
Addr paddr = req->getPaddr();
}
Fault
-TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation,
+TLB::translate(const RequestPtr &req,
+ ThreadContext *tc, Translation *translation,
Mode mode, bool &delayedResponse, bool timing)
{
Request::Flags flags = req->getFlags();
}
Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
bool delayedResponse;
return TLB::translate(req, tc, NULL, mode, delayedResponse, false);
}
void
-TLB::translateTiming(RequestPtr req, ThreadContext *tc,
+TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode)
{
bool delayedResponse;
Stats::Scalar rdMisses;
Stats::Scalar wrMisses;
- Fault translateInt(RequestPtr req, ThreadContext *tc);
+ Fault translateInt(const RequestPtr &req, ThreadContext *tc);
- Fault translate(RequestPtr req, ThreadContext *tc,
+ Fault translate(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode,
bool &delayedResponse, bool timing);
}
Fault translateAtomic(
- RequestPtr req, ThreadContext *tc, Mode mode) override;
+ const RequestPtr &req, ThreadContext *tc, Mode mode) override;
void translateTiming(
- RequestPtr req, ThreadContext *tc,
+ const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override;
/**
* @param mode Request type (read/write/execute).
* @return A fault on failure, NoFault otherwise.
*/
- Fault finalizePhysical(RequestPtr req, ThreadContext *tc,
+ Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc,
Mode mode) const override;
TlbEntry *insert(Addr vpn, const TlbEntry &entry);
assert(tid < numThreads);
AddressMonitor &monitor = addressMonitor[tid];
- Request req;
+ RequestPtr req;
Addr addr = monitor.vAddr;
int block_size = cacheLineSize();
uint64_t mask = ~((uint64_t)(block_size - 1));
if (secondAddr > addr)
size = secondAddr - addr;
- req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
+ req->setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
// translate to physical address
- Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
+ Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
assert(fault == NoFault);
- monitor.pAddr = req.getPaddr() & mask;
+ monitor.pAddr = req->getPaddr() & mask;
monitor.waiting = true;
DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
Request::Flags flags, uint64_t *res);
/** Splits a request in two if it crosses a dcache block. */
- void splitRequest(RequestPtr req, RequestPtr &sreqLow,
+ void splitRequest(const RequestPtr &req, RequestPtr &sreqLow,
RequestPtr &sreqHigh);
/** Initiate a DTB address translation. */
- void initiateTranslation(RequestPtr req, RequestPtr sreqLow,
- RequestPtr sreqHigh, uint64_t *res,
+ void initiateTranslation(const RequestPtr &req, const RequestPtr &sreqLow,
+ const RequestPtr &sreqHigh, uint64_t *res,
BaseTLB::Mode mode);
/** Finish a DTB address translation. */
sreqLow = savedSreqLow;
sreqHigh = savedSreqHigh;
} else {
- req = new Request(asid, addr, size, flags, masterId(), this->pc.instAddr(),
- thread->contextId());
+ req = std::make_shared<Request>(
+ asid, addr, size, flags, masterId(),
+ this->pc.instAddr(), thread->contextId());
req->taskId(cpu->taskId());
instFlags[EffAddrValid] = true;
if (cpu->checker) {
- if (reqToVerify != NULL) {
- delete reqToVerify;
- }
- reqToVerify = new Request(*req);
+ reqToVerify = std::make_shared<Request>(*req);
}
fault = cpu->read(req, sreqLow, sreqHigh, lqIdx);
} else {
sreqLow = savedSreqLow;
sreqHigh = savedSreqHigh;
} else {
- req = new Request(asid, addr, size, flags, masterId(), this->pc.instAddr(),
- thread->contextId());
+ req = std::make_shared<Request>(
+ asid, addr, size, flags, masterId(),
+ this->pc.instAddr(), thread->contextId());
req->taskId(cpu->taskId());
instFlags[EffAddrValid] = true;
if (cpu->checker) {
- if (reqToVerify != NULL) {
- delete reqToVerify;
- }
- reqToVerify = new Request(*req);
+ reqToVerify = std::make_shared<Request>(*req);
}
fault = cpu->write(req, sreqLow, sreqHigh, data, sqIdx);
}
template<class Impl>
inline void
-BaseDynInst<Impl>::splitRequest(RequestPtr req, RequestPtr &sreqLow,
+BaseDynInst<Impl>::splitRequest(const RequestPtr &req, RequestPtr &sreqLow,
RequestPtr &sreqHigh)
{
// Check to see if the request crosses the next level block boundary.
template<class Impl>
inline void
-BaseDynInst<Impl>::initiateTranslation(RequestPtr req, RequestPtr sreqLow,
- RequestPtr sreqHigh, uint64_t *res,
+BaseDynInst<Impl>::initiateTranslation(const RequestPtr &req,
+ const RequestPtr &sreqLow,
+ const RequestPtr &sreqHigh,
+ uint64_t *res,
BaseTLB::Mode mode)
{
translationStarted(true);
cpu->snList.insert(seqNum);
#endif
- reqToVerify = NULL;
}
template <class Impl>
cpu->snList.erase(seqNum);
#endif
- if (reqToVerify)
- delete reqToVerify;
}
#ifdef DEBUG
: BaseCPU(p, true), systemPtr(NULL), icachePort(NULL), dcachePort(NULL),
tc(NULL), thread(NULL)
{
- memReq = NULL;
curStaticInst = NULL;
curMacroStaticInst = NULL;
// Need to account for multiple accesses like the Atomic and TimingSimple
while (1) {
- memReq = new Request(0, addr, size, flags, masterId,
- thread->pcState().instAddr(), tc->contextId());
+ auto mem_req = std::make_shared<Request>(
+ 0, addr, size, flags, masterId,
+ thread->pcState().instAddr(), tc->contextId());
// translate to physical address
- fault = dtb->translateFunctional(memReq, tc, BaseTLB::Read);
+ fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Read);
if (!checked_flags && fault == NoFault && unverifiedReq) {
- flags_match = checkFlags(unverifiedReq, memReq->getVaddr(),
- memReq->getPaddr(), memReq->getFlags());
- pAddr = memReq->getPaddr();
+ flags_match = checkFlags(unverifiedReq, mem_req->getVaddr(),
+ mem_req->getPaddr(), mem_req->getFlags());
+ pAddr = mem_req->getPaddr();
checked_flags = true;
}
// Now do the access
if (fault == NoFault &&
- !memReq->getFlags().isSet(Request::NO_ACCESS)) {
- PacketPtr pkt = Packet::createRead(memReq);
+ !mem_req->getFlags().isSet(Request::NO_ACCESS)) {
+ PacketPtr pkt = Packet::createRead(mem_req);
pkt->dataStatic(data);
- if (!(memReq->isUncacheable() || memReq->isMmappedIpr())) {
+ if (!(mem_req->isUncacheable() || mem_req->isMmappedIpr())) {
// Access memory to see if we have the same data
dcachePort->sendFunctional(pkt);
} else {
memcpy(data, unverifiedMemData, size);
}
- delete memReq;
- memReq = NULL;
delete pkt;
}
if (fault != NoFault) {
- if (memReq->isPrefetch()) {
+ if (mem_req->isPrefetch()) {
fault = NoFault;
}
- delete memReq;
- memReq = NULL;
break;
}
- if (memReq != NULL) {
- delete memReq;
- }
-
//If we don't need to access a second cache line, stop now.
if (secondAddr <= addr)
{
// Need to account for a multiple access like Atomic and Timing CPUs
while (1) {
- memReq = new Request(0, addr, size, flags, masterId,
- thread->pcState().instAddr(), tc->contextId());
+ auto mem_req = std::make_shared<Request>(
+ 0, addr, size, flags, masterId,
+ thread->pcState().instAddr(), tc->contextId());
// translate to physical address
- fault = dtb->translateFunctional(memReq, tc, BaseTLB::Write);
+ fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Write);
if (!checked_flags && fault == NoFault && unverifiedReq) {
- flags_match = checkFlags(unverifiedReq, memReq->getVaddr(),
- memReq->getPaddr(), memReq->getFlags());
- pAddr = memReq->getPaddr();
+ flags_match = checkFlags(unverifiedReq, mem_req->getVaddr(),
+ mem_req->getPaddr(), mem_req->getFlags());
+ pAddr = mem_req->getPaddr();
checked_flags = true;
}
* enabled. This is left as future work for the Checker: LSQ snooping
* and memory validation after stores have committed.
*/
- bool was_prefetch = memReq->isPrefetch();
-
- delete memReq;
+ bool was_prefetch = mem_req->isPrefetch();
//If we don't need to access a second cache line, stop now.
if (fault != NoFault || secondAddr <= addr)
* Checks if the flags set by the Checker and Checkee match.
*/
bool
-CheckerCPU::checkFlags(RequestPtr unverified_req, Addr vAddr,
+CheckerCPU::checkFlags(const RequestPtr &unverified_req, Addr vAddr,
Addr pAddr, int flags)
{
Addr unverifiedVAddr = unverified_req->getVaddr();
// keep them all in a std::queue
std::queue<InstResult> result;
- // Pointer to the one memory request.
- RequestPtr memReq;
-
StaticInstPtr curStaticInst;
StaticInstPtr curMacroStaticInst;
dumpAndExit();
}
- bool checkFlags(RequestPtr unverified_req, Addr vAddr,
+ bool checkFlags(const RequestPtr &unverified_req, Addr vAddr,
Addr pAddr, int flags);
void dumpAndExit();
// If not in the middle of a macro instruction
if (!curMacroStaticInst) {
// set up memory request for instruction fetch
- memReq = new Request(unverifiedInst->threadNumber, fetch_PC,
- sizeof(MachInst),
- 0,
- masterId,
- fetch_PC, thread->contextId());
- memReq->setVirt(0, fetch_PC, sizeof(MachInst),
- Request::INST_FETCH, masterId, thread->instAddr());
+ auto mem_req = std::make_shared<Request>(
+ unverifiedInst->threadNumber, fetch_PC,
+ sizeof(MachInst), 0, masterId, fetch_PC,
+ thread->contextId());
+ mem_req->setVirt(0, fetch_PC, sizeof(MachInst),
+ Request::INST_FETCH, masterId,
+ thread->instAddr());
- fault = itb->translateFunctional(memReq, tc, BaseTLB::Execute);
+ fault = itb->translateFunctional(
+ mem_req, tc, BaseTLB::Execute);
if (fault != NoFault) {
if (unverifiedInst->getFault() == NoFault) {
advancePC(NoFault);
// Give up on an ITB fault..
- delete memReq;
unverifiedInst = NULL;
return;
} else {
// the fault and see if our results match the CPU on
// the next tick().
fault = unverifiedInst->getFault();
- delete memReq;
break;
}
} else {
- PacketPtr pkt = new Packet(memReq, MemCmd::ReadReq);
+ PacketPtr pkt = new Packet(mem_req, MemCmd::ReadReq);
pkt->dataStatic(&machInst);
icachePort->sendFunctional(pkt);
machInst = gtoh(machInst);
- delete memReq;
delete pkt;
}
}
{
if (cpu->system->isAtomicMode()) {
Tick delay = sendAtomic(pkt);
- delete pkt->req;
delete pkt;
return delay;
} else {
{
DPRINTF(KvmIO, "KVM: Finished timing request\n");
- delete pkt->req;
delete pkt;
activeMMIOReqs--;
ThreadContext *tc(thread->getTC());
syncThreadContext();
- RequestPtr mmio_req = new Request(paddr, size, Request::UNCACHEABLE,
- dataMasterId());
+ RequestPtr mmio_req = std::make_shared<Request>(
+ paddr, size, Request::UNCACHEABLE, dataMasterId());
+
mmio_req->setContext(tc->contextId());
// Some architectures do need to massage physical addresses a bit
// before they are inserted into the memory system. This enables
TheISA::handleIprWrite(tc, pkt) :
TheISA::handleIprRead(tc, pkt));
threadContextDirty = true;
- delete pkt->req;
delete pkt;
return clockPeriod() * ipr_delay;
} else {
// prevent races in multi-core mode.
EventQueue::ScopedMigration migrate(deviceEventQueue());
for (int i = 0; i < count; ++i) {
- RequestPtr io_req = new Request(pAddr, kvm_run.io.size,
- Request::UNCACHEABLE, dataMasterId());
+ RequestPtr io_req = std::make_shared<Request>(
+ pAddr, kvm_run.io.size,
+ Request::UNCACHEABLE, dataMasterId());
+
io_req->setContext(tc->contextId());
PacketPtr pkt = new Packet(io_req, cmd);
"%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
request_id, aligned_pc, thread.pc, line_offset, request_size);
- request->request.setContext(cpu.threads[tid]->getTC()->contextId());
- request->request.setVirt(0 /* asid */,
+ request->request->setContext(cpu.threads[tid]->getTC()->contextId());
+ request->request->setVirt(0 /* asid */,
aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
/* I've no idea why we need the PC, but give it */
thread.pc.instAddr());
* through finish/markDelayed on this request as it bears
* the Translation interface */
cpu.threads[request->id.threadId]->itb->translateTiming(
- &request->request,
+ request->request,
cpu.getContext(request->id.threadId),
request, BaseTLB::Execute);
Fetch1::FetchRequest::makePacket()
{
/* Make the necessary packet for a memory transaction */
- packet = new Packet(&request, MemCmd::ReadReq);
+ packet = new Packet(request, MemCmd::ReadReq);
packet->allocate();
/* This FetchRequest becomes SenderState to allow the response to be
}
void
-Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_,
+Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = fault_;
DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
"paddr: 0x%x, vaddr: 0x%x\n",
response->fault->name(),
- (response->request.hasPaddr() ? response->request.getPaddr() : 0),
- response->request.getVaddr());
+ (response->request->hasPaddr() ?
+ response->request->getPaddr() : 0),
+ response->request->getVaddr());
if (DTRACE(MinorTrace))
minorTraceResponseLine(name(), response);
Fetch1::minorTraceResponseLine(const std::string &name,
Fetch1::FetchRequestPtr response) const
{
- Request &request M5_VAR_USED = response->request;
+ const RequestPtr &request M5_VAR_USED = response->request;
if (response->packet && response->packet->isError()) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
- response->id, request.getVaddr());
+ response->id, request->getVaddr());
} else if (response->fault != NoFault) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
- response->id, request.getVaddr(), response->fault->name());
+ response->id, request->getVaddr(), response->fault->name());
} else {
MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
- response->id, request.getSize(),
- request.getVaddr(), request.getPaddr());
+ response->id, request->getSize(),
+ request->getVaddr(), request->getPaddr());
}
}
line.pc = response->pc;
/* Set the lineBase, which is a sizeof(MachInst) aligned address <=
* pc.instAddr() */
- line.lineBaseAddr = response->request.getVaddr();
+ line.lineBaseAddr = response->request->getVaddr();
if (response->fault != NoFault) {
/* Stop fetching if there was a fault */
PacketPtr packet;
/** The underlying request that this fetch represents */
- Request request;
+ RequestPtr request;
/** PC to fixup with line address */
TheISA::PCState pc;
/** Interface for ITLB responses. Populates self and then passes
* the request on to the ports' handleTLBResponse member
* function */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode);
public:
request(),
pc(pc_),
fault(NoFault)
- { }
+ {
+ request = std::make_shared<Request>();
+ }
~FetchRequest();
};
skipped(false),
issuedToMemory(false),
state(NotIssued)
-{ }
+{
+ request = std::make_shared<Request>();
+}
LSQ::AddrRangeCoverage
LSQ::LSQRequest::containsAddrRangeOf(
LSQ::AddrRangeCoverage
LSQ::LSQRequest::containsAddrRangeOf(LSQRequestPtr other_request)
{
- return containsAddrRangeOf(request.getPaddr(), request.getSize(),
- other_request->request.getPaddr(), other_request->request.getSize());
+ return containsAddrRangeOf(request->getPaddr(), request->getSize(),
+ other_request->request->getPaddr(), other_request->request->getSize());
}
bool
}
void
-LSQ::SingleDataRequest::finish(const Fault &fault_, RequestPtr request_,
+LSQ::SingleDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = fault_;
* finish/markDelayed on the LSQRequest as it bears the Translation
* interface */
thread->getDTBPtr()->translateTiming(
- &request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
+ request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
}
void
}
void
-LSQ::SplitDataRequest::finish(const Fault &fault_, RequestPtr request_,
+LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = fault_;
LSQ::SplitDataRequest::~SplitDataRequest()
{
- for (auto i = fragmentRequests.begin();
- i != fragmentRequests.end(); i++)
- {
- delete *i;
- }
-
for (auto i = fragmentPackets.begin();
i != fragmentPackets.end(); i++)
{
void
LSQ::SplitDataRequest::makeFragmentRequests()
{
- Addr base_addr = request.getVaddr();
- unsigned int whole_size = request.getSize();
+ Addr base_addr = request->getVaddr();
+ unsigned int whole_size = request->getSize();
unsigned int line_width = port.lineWidth;
unsigned int fragment_size;
}
}
- RequestPtr fragment = new Request();
+ RequestPtr fragment = std::make_shared<Request>();
- fragment->setContext(request.contextId());
+ fragment->setContext(request->contextId());
fragment->setVirt(0 /* asid */,
- fragment_addr, fragment_size, request.getFlags(),
- request.masterId(),
- request.getPC());
+ fragment_addr, fragment_size, request->getFlags(),
+ request->masterId(),
+ request->getPC());
DPRINTFS(MinorMem, (&port), "Generating fragment addr: 0x%x size: %d"
" (whole request addr: 0x%x size: %d) %s\n",
void
LSQ::SplitDataRequest::makeFragmentPackets()
{
- Addr base_addr = request.getVaddr();
+ Addr base_addr = request->getVaddr();
DPRINTFS(MinorMem, (&port), "Making packets for request: %s\n", *inst);
assert(fragment->hasPaddr());
PacketPtr fragment_packet =
- makePacketForRequest(*fragment, isLoad, this, request_data);
+ makePacketForRequest(fragment, isLoad, this, request_data);
fragmentPackets.push_back(fragment_packet);
/* Accumulate flags in parent request */
- request.setFlags(fragment->getFlags());
+ request->setFlags(fragment->getFlags());
}
/* Might as well make the overall/response packet here */
/* Get the physical address for the whole request/packet from the first
* fragment */
- request.setPaddr(fragmentRequests[0]->getPaddr());
+ request->setPaddr(fragmentRequests[0]->getPaddr());
makePacket();
}
DPRINTFS(MinorMem, (&port), "Retiring fragment addr: 0x%x size: %d"
" offset: 0x%x (retired fragment num: %d) %s\n",
response->req->getVaddr(), response->req->getSize(),
- request.getVaddr() - response->req->getVaddr(),
+ request->getVaddr() - response->req->getVaddr(),
numRetiredFragments,
(fault == NoFault ? "" : fault->name()));
/* For a split transfer, a Packet must be constructed
* to contain all returning data. This is that packet's
* data */
- data = new uint8_t[request.getSize()];
+ data = new uint8_t[request->getSize()];
}
/* Populate the portion of the overall response data represented
* by the response fragment */
std::memcpy(
- data + (response->req->getVaddr() - request.getVaddr()),
+ data + (response->req->getVaddr() - request->getVaddr()),
response->getConstPtr<uint8_t>(),
response->req->getSize());
}
DPRINTFS(MinorMem, (&port), "Retired packet isRead: %d isWrite: %d"
" needsResponse: %d packetSize: %s requestSize: %s responseSize:"
" %s\n", packet->isRead(), packet->isWrite(),
- packet->needsResponse(), packet->getSize(), request.getSize(),
+ packet->needsResponse(), packet->getSize(), request->getSize(),
response->getSize());
/* A request can become complete by several paths, this is a sanity
* check to make sure the packet's data is created */
if (!data) {
- data = new uint8_t[request.getSize()];
+ data = new uint8_t[request->getSize()];
}
if (isLoad) {
DPRINTFS(MinorMem, (&port), "Copying read data\n");
- std::memcpy(packet->getPtr<uint8_t>(), data, request.getSize());
+ std::memcpy(packet->getPtr<uint8_t>(), data, request->getSize());
}
packet->makeResponse();
}
DPRINTF(MinorMem, "Forwarding: slot: %d result: %s thisAddr:"
" 0x%x thisSize: %d slotAddr: 0x%x slotSize: %d\n",
slot_index, coverage,
- request->request.getPaddr(), request->request.getSize(),
- slot->request.getPaddr(), slot->request.getSize());
+ request->request->getPaddr(), request->request->getSize(),
+ slot->request->getPaddr(), slot->request->getSize());
found_slot = slot_index;
ret = coverage;
assert(store->packet);
assert(store->containsAddrRangeOf(load) == FullAddrRangeCoverage);
- Addr load_addr = load->request.getPaddr();
- Addr store_addr = store->request.getPaddr();
+ Addr load_addr = load->request->getPaddr();
+ Addr store_addr = store->request->getPaddr();
Addr addr_offset = load_addr - store_addr;
- unsigned int load_size = load->request.getSize();
+ unsigned int load_size = load->request->getSize();
DPRINTF(MinorMem, "Forwarding %d bytes for addr: 0x%x from store buffer"
" slot: %d addr: 0x%x addressOffset: 0x%x\n",
}
bool is_load = request->isLoad;
- bool is_llsc = request->request.isLLSC();
- bool is_swap = request->request.isSwap();
- bool bufferable = !(request->request.isStrictlyOrdered() ||
+ bool is_llsc = request->request->isLLSC();
+ bool is_swap = request->request->isSwap();
+ bool bufferable = !(request->request->isStrictlyOrdered() ||
is_llsc || is_swap);
if (is_load) {
}
} else {
/* Store. Can it be sent to the store buffer? */
- if (bufferable && !request->request.isMmappedIpr()) {
+ if (bufferable && !request->request->isMmappedIpr()) {
request->setState(LSQRequest::StoreToStoreBuffer);
moveFromRequestsToTransfers(request);
DPRINTF(MinorMem, "Moving store into transfers queue\n");
/* Handle LLSC requests and tests */
if (is_load) {
- TheISA::handleLockedRead(&context, &request->request);
+ TheISA::handleLockedRead(&context, request->request);
} else {
do_access = TheISA::handleLockedWrite(&context,
- &request->request, cacheBlockMask);
+ request->request, cacheBlockMask);
if (!do_access) {
DPRINTF(MinorMem, "Not perfoming a memory "
* so the response can be correctly handled */
assert(packet->findNextSenderState<LSQRequest>());
- if (request->request.isMmappedIpr()) {
+ if (request->request->isMmappedIpr()) {
ThreadContext *thread =
cpu.getContext(cpu.contextToThread(
- request->request.contextId()));
+ request->request->contextId()));
if (request->isLoad) {
DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
inst->traceData->setMem(addr, size, flags);
int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
- request->request.setContext(cid);
- request->request.setVirt(0 /* asid */,
+ request->request->setContext(cid);
+ request->request->setVirt(0 /* asid */,
addr, size, flags, cpu.dataMasterId(),
/* I've no idea why we need the PC, but give it */
inst->pc.instAddr());
}
PacketPtr
-makePacketForRequest(Request &request, bool isLoad,
+makePacketForRequest(const RequestPtr &request, bool isLoad,
Packet::SenderState *sender_state, PacketDataPtr data)
{
- PacketPtr ret = isLoad ? Packet::createRead(&request)
- : Packet::createWrite(&request);
+ PacketPtr ret = isLoad ? Packet::createRead(request)
+ : Packet::createWrite(request);
if (sender_state)
ret->pushSenderState(sender_state);
if (isLoad) {
ret->allocate();
- } else if (!request.isCacheMaintenance()) {
+ } else if (!request->isCacheMaintenance()) {
// CMOs are treated as stores but they don't have data. All
// stores otherwise need to allocate for data.
ret->dataDynamic(data);
PacketPtr packet;
/** The underlying request of this LSQRequest */
- Request request;
+ RequestPtr request;
/** Fault generated performing this request */
Fault fault;
{
protected:
/** TLB interace */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{ }
{
protected:
/** TLB interace */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode);
/** Has my only packet been sent to the memory system but has not
protected:
/** TLB response interface */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode);
public:
/** Make a suitable packet for the given request. If the request is a store,
* data will be the payload data. If sender_state is NULL, it won't be
* pushed into the packet as senderState */
-PacketPtr makePacketForRequest(Request &request, bool isLoad,
+PacketPtr makePacketForRequest(const RequestPtr &request, bool isLoad,
Packet::SenderState *sender_state = NULL, PacketDataPtr data = NULL);
}
std::vector<ThreadID> tids;
/** CPU read function, forwards read to LSQ. */
- Fault read(RequestPtr &req, RequestPtr &sreqLow, RequestPtr &sreqHigh,
+ Fault read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx)
{
return this->iew.ldstQueue.read(req, sreqLow, sreqHigh, load_idx);
}
/** CPU write function, forwards write to LSQ. */
- Fault write(RequestPtr &req, RequestPtr &sreqLow, RequestPtr &sreqHigh,
+ Fault write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx)
{
return this->iew.ldstQueue.write(req, sreqLow, sreqHigh,
{}
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode)
{
assert(mode == BaseTLB::Execute);
fault = _fault;
}
- void setReq(RequestPtr _req)
+ void setReq(const RequestPtr &_req)
{
req = _req;
}
* @return Any fault that occured.
*/
bool fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc);
- void finishTranslation(const Fault &fault, RequestPtr mem_req);
+ void finishTranslation(const Fault &fault, const RequestPtr &mem_req);
/** Check if an interrupt is pending and that we need to handle
if (fetchStatus[tid] != IcacheWaitResponse ||
pkt->req != memReq[tid]) {
++fetchIcacheSquashes;
- delete pkt->req;
delete pkt;
return;
}
pkt->req->setAccessLatency();
cpu->ppInstAccessComplete->notify(pkt);
// Reset the mem req to NULL.
- delete pkt->req;
delete pkt;
memReq[tid] = NULL;
}
// Setup the memReq to do a read of the first instruction's address.
// Set the appropriate read size and flags as well.
// Build request here.
- RequestPtr mem_req =
- new Request(tid, fetchBufferBlockPC, fetchBufferSize,
- Request::INST_FETCH, cpu->instMasterId(), pc,
- cpu->thread[tid]->contextId());
+ RequestPtr mem_req = std::make_shared<Request>(
+ tid, fetchBufferBlockPC, fetchBufferSize,
+ Request::INST_FETCH, cpu->instMasterId(), pc,
+ cpu->thread[tid]->contextId());
mem_req->taskId(cpu->taskId());
template <class Impl>
void
-DefaultFetch<Impl>::finishTranslation(const Fault &fault, RequestPtr mem_req)
+DefaultFetch<Impl>::finishTranslation(const Fault &fault,
+ const RequestPtr &mem_req)
{
ThreadID tid = cpu->contextToThread(mem_req->contextId());
Addr fetchBufferBlockPC = mem_req->getVaddr();
DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
tid);
++fetchTlbSquashes;
- delete mem_req;
return;
}
warn("Address %#x is outside of physical memory, stopping fetch\n",
mem_req->getPaddr());
fetchStatus[tid] = NoGoodAddr;
- delete mem_req;
memReq[tid] = NULL;
return;
}
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
// Translation faulted, icache request won't be sent.
- delete mem_req;
memReq[tid] = NULL;
// Send the fault to commit. This thread will not do anything
if (retryTid == tid) {
assert(cacheBlocked);
if (retryPkt) {
- delete retryPkt->req;
delete retryPkt;
}
retryPkt = NULL;
/** Executes a read operation, using the load specified at the load
* index.
*/
- Fault read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx);
/** Executes a store operation, using the store specified at the store
* index.
*/
- Fault write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx);
/**
template <class Impl>
Fault
-LSQ<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQ<Impl>::read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx)
{
ThreadID tid = cpu->contextToThread(req->contextId());
template <class Impl>
Fault
-LSQ<Impl>::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQ<Impl>::write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx)
{
ThreadID tid = cpu->contextToThread(req->contextId());
}
}
- delete pkt->req;
delete pkt;
return true;
}
public:
/** Executes the load at the given index. */
- Fault read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx);
/** Executes the store at the given index. */
- Fault write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx);
/** Returns the index of the head load instruction. */
template <class Impl>
Fault
-LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQUnit<Impl>::read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx)
{
DynInstPtr load_inst = loadQueue[load_idx];
DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n",
load_inst->seqNum, load_inst->pcState());
- // Must delete request now that it wasn't handed off to
- // memory. This is quite ugly. @todo: Figure out the proper
- // place to really handle request deletes.
- delete req;
- if (TheISA::HasUnalignedMemAcc && sreqLow) {
- delete sreqLow;
- delete sreqHigh;
- }
return std::make_shared<GenericISA::M5PanicFault>(
"Strictly ordered load [sn:%llx] PC %s\n",
load_inst->seqNum, load_inst->pcState());
if (delay2 > delay)
delay = delay2;
- delete sreqLow;
- delete sreqHigh;
delete fst_data_pkt;
delete snd_data_pkt;
}
// @todo: Need to make this a parameter.
cpu->schedule(wb, curTick());
- // Don't need to do anything special for split loads.
- if (TheISA::HasUnalignedMemAcc && sreqLow) {
- delete sreqLow;
- delete sreqHigh;
- }
-
++lsqForwLoads;
return NoFault;
} else if (
"Store idx %i to load addr %#x\n",
store_idx, req->getVaddr());
- // Must delete request now that it wasn't handed off to
- // memory. This is quite ugly. @todo: Figure out the
- // proper place to really handle request deletes.
- delete req;
- if (TheISA::HasUnalignedMemAcc && sreqLow) {
- delete sreqLow;
- delete sreqHigh;
- }
-
return NoFault;
}
}
if (!sreqLow) {
// Packet wasn't split, just delete main packet info
delete state;
- delete req;
delete data_pkt;
}
if (!completedFirst) {
// Split packet, but first failed. Delete all state.
delete state;
- delete req;
delete data_pkt;
delete fst_data_pkt;
delete snd_data_pkt;
- delete sreqLow;
- delete sreqHigh;
- sreqLow = NULL;
- sreqHigh = NULL;
+ sreqLow.reset();
+ sreqHigh.reset();
} else {
// Can't delete main packet data or state because first packet
// was sent to the memory system
delete data_pkt;
- delete req;
- delete sreqHigh;
delete snd_data_pkt;
- sreqHigh = NULL;
+ sreqHigh.reset();
}
}
template <class Impl>
Fault
-LSQUnit<Impl>::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQUnit<Impl>::write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx)
{
assert(storeQueue[store_idx].inst);
if (pkt->senderState)
delete pkt->senderState;
- delete pkt->req;
delete pkt;
}
}
if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
- delete state->mainPkt->req;
delete state->mainPkt;
}
DynInstPtr inst = storeQueue[storeWBIdx].inst;
- RequestPtr req = storeQueue[storeWBIdx].req;
- RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
- RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
+ RequestPtr &req = storeQueue[storeWBIdx].req;
+ const RequestPtr &sreqLow = storeQueue[storeWBIdx].sreqLow;
+ const RequestPtr &sreqHigh = storeQueue[storeWBIdx].sreqHigh;
storeQueue[storeWBIdx].committed = true;
state->outstanding = 2;
// Can delete the main request now.
- delete req;
req = sreqLow;
}
assert(snd_data_pkt->req->isMmappedIpr());
TheISA::handleIprWrite(thread, snd_data_pkt);
delete snd_data_pkt;
- delete sreqLow;
- delete sreqHigh;
}
delete state;
- delete req;
completeStore(storeWBIdx);
incrStIdx(storeWBIdx);
} else if (!sendStore(data_pkt)) {
// Must delete request now that it wasn't handed off to
// memory. This is quite ugly. @todo: Figure out the proper
// place to really handle request deletes.
- delete storeQueue[store_idx].req;
+ storeQueue[store_idx].req.reset();
if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
- delete storeQueue[store_idx].sreqLow;
- delete storeQueue[store_idx].sreqHigh;
-
- storeQueue[store_idx].sreqLow = NULL;
- storeQueue[store_idx].sreqHigh = NULL;
+ storeQueue[store_idx].sreqLow.reset();
+ storeQueue[store_idx].sreqHigh.reset();
}
- storeQueue[store_idx].req = NULL;
--stores;
// Inefficient!
BaseSimpleCPU::init();
int cid = threadContexts[0]->contextId();
- ifetch_req.setContext(cid);
- data_read_req.setContext(cid);
- data_write_req.setContext(cid);
+ ifetch_req->setContext(cid);
+ data_read_req->setContext(cid);
+ data_write_req->setContext(cid);
}
AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
ppCommit(nullptr)
{
_status = Idle;
+ ifetch_req = std::make_shared<Request>();
+ data_read_req = std::make_shared<Request>();
+ data_write_req = std::make_shared<Request>();
}
SimpleThread* thread = t_info.thread;
// use the CPU's statically allocated read request and packet objects
- RequestPtr req = &data_read_req;
+ const RequestPtr &req = data_read_req;
if (traceData)
traceData->setMem(addr, size, flags);
}
// use the CPU's statically allocated write request and packet objects
- RequestPtr req = &data_write_req;
+ const RequestPtr &req = data_write_req;
if (traceData)
traceData->setMem(addr, size, flags);
if (numThreads > 1) {
ContextID cid = threadContexts[curThread]->contextId();
- ifetch_req.setContext(cid);
- data_read_req.setContext(cid);
- data_write_req.setContext(cid);
+ ifetch_req->setContext(cid);
+ data_read_req->setContext(cid);
+ data_write_req->setContext(cid);
}
SimpleExecContext& t_info = *threadInfo[curThread];
bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
!curMacroStaticInst;
if (needToFetch) {
- ifetch_req.taskId(taskId());
- setupFetchRequest(&ifetch_req);
- fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(),
+ ifetch_req->taskId(taskId());
+ setupFetchRequest(ifetch_req);
+ fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
BaseTLB::Execute);
}
//if (decoder.needMoreBytes())
//{
icache_access = true;
- Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
+ Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq);
ifetch_pkt.dataStatic(&inst);
if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
AtomicCPUDPort dcachePort;
bool fastmem;
- Request ifetch_req;
- Request data_read_req;
- Request data_write_req;
+ RequestPtr ifetch_req;
+ RequestPtr data_read_req;
+ RequestPtr data_write_req;
bool dcache_access;
Tick dcache_latency;
void
-BaseSimpleCPU::setupFetchRequest(RequestPtr req)
+BaseSimpleCPU::setupFetchRequest(const RequestPtr &req)
{
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
void checkForInterrupts();
- void setupFetchRequest(RequestPtr req);
+ void setupFetchRequest(const RequestPtr &req);
void preExecute();
void postExecute();
void advancePC(const Fault &fault);
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
// We're about the issues a locked load, so tell the monitor
// to start caring about this address
}
void
-TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
+TimingSimpleCPU::sendData(const RequestPtr &req, uint8_t *data, uint64_t *res,
bool read)
{
SimpleExecContext &t_info = *threadInfo[curThread];
}
void
-TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
- RequestPtr req, uint8_t *data, bool read)
+TimingSimpleCPU::sendSplitData(const RequestPtr &req1, const RequestPtr &req2,
+ const RequestPtr &req, uint8_t *data, bool read)
{
PacketPtr pkt1, pkt2;
buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
}
PacketPtr
-TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
+TimingSimpleCPU::buildPacket(const RequestPtr &req, bool read)
{
return read ? Packet::createRead(req) : Packet::createWrite(req);
}
void
TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
- RequestPtr req1, RequestPtr req2, RequestPtr req,
+ const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req,
uint8_t *data, bool read)
{
pkt1 = pkt2 = NULL;
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
- thread->contextId());
+ RequestPtr req = std::make_shared<Request>(
+ asid, addr, size, flags, dataMasterId(), pc,
+ thread->contextId());
req->taskId(taskId());
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
- RequestPtr req = dcache_pkt->req;
+ const RequestPtr &req = dcache_pkt->req;
if (req->isMmappedIpr()) {
Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
new IprEvent(dcache_pkt, this, clockEdge(delay));
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
- thread->contextId());
+ RequestPtr req = std::make_shared<Request>(
+ asid, addr, size, flags, dataMasterId(), pc,
+ thread->contextId());
req->taskId(taskId());
if (needToFetch) {
_status = BaseSimpleCPU::Running;
- RequestPtr ifetch_req = new Request();
+ RequestPtr ifetch_req = std::make_shared<Request>();
ifetch_req->taskId(taskId());
ifetch_req->setContext(thread->contextId());
setupFetchRequest(ifetch_req);
void
-TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
+TimingSimpleCPU::sendFetch(const Fault &fault, const RequestPtr &req,
ThreadContext *tc)
{
if (fault == NoFault) {
}
} else {
DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
- delete req;
// fetch fault: advance directly to next instruction (fault handler)
_status = BaseSimpleCPU::Running;
advanceInst(fault);
}
if (pkt) {
- delete pkt->req;
delete pkt;
}
}
SplitFragmentSenderState * send_state =
dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
assert(send_state);
- delete pkt->req;
delete pkt;
PacketPtr big_pkt = send_state->bigPkt;
delete send_state;
traceData = NULL;
}
- delete pkt->req;
delete pkt;
postExecute();
}
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode)
{
cpu->sendFetch(fault, req, tc);
FetchTranslation fetchTranslation;
void threadSnoop(PacketPtr pkt, ThreadID sender);
- void sendData(RequestPtr req, uint8_t *data, uint64_t *res, bool read);
- void sendSplitData(RequestPtr req1, RequestPtr req2, RequestPtr req,
+ void sendData(const RequestPtr &req,
+ uint8_t *data, uint64_t *res, bool read);
+ void sendSplitData(const RequestPtr &req1, const RequestPtr &req2,
+ const RequestPtr &req,
uint8_t *data, bool read);
void translationFault(const Fault &fault);
- PacketPtr buildPacket(RequestPtr req, bool read);
+ PacketPtr buildPacket(const RequestPtr &req, bool read);
void buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
- RequestPtr req1, RequestPtr req2, RequestPtr req,
+ const RequestPtr &req1, const RequestPtr &req2,
+ const RequestPtr &req,
uint8_t *data, bool read);
bool handleReadPacket(PacketPtr pkt);
Addr addr, Request::Flags flags, uint64_t *res) override;
void fetch();
- void sendFetch(const Fault &fault, RequestPtr req, ThreadContext *tc);
+ void sendFetch(const Fault &fault,
+ const RequestPtr &req, ThreadContext *tc);
void completeIfetch(PacketPtr );
void completeDataAccess(PacketPtr pkt);
void advanceInst(const Fault &fault);
Packet::Command cmd;
// For simplicity, requests are assumed to be 1 byte-sized
- RequestPtr req = new Request(m_address, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
//
// Based on the current state, issue a load or a store
// If the packet did not issue, must delete
// Note: No need to delete the data, the packet destructor
// will delete it
- delete pkt->req;
delete pkt;
DPRINTF(DirectedTest, "failed to issue request - sequencer not ready\n");
//
// Now that the tester has completed, delete the packet, then return
//
- delete pkt->req;
delete pkt;
return true;
}
Request::Flags flags;
// For simplicity, requests are assumed to be 1 byte-sized
- RequestPtr req = new Request(m_address, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
Packet::Command cmd;
bool do_write = (random_mt.random(0, 100) < m_percent_writes);
// If the packet did not issue, must delete
// Note: No need to delete the data, the packet destructor
// will delete it
- delete pkt->req;
delete pkt;
DPRINTF(DirectedTest, "failed to initiate request - sequencer not ready\n");
void
GarnetSyntheticTraffic::completeRequest(PacketPtr pkt)
{
- RequestPtr req = pkt->req;
-
DPRINTF(GarnetSyntheticTraffic,
"Completed injection of %s packet for address %x\n",
pkt->isWrite() ? "write" : "read\n",
- req->getPaddr());
+ pkt->req->getPaddr());
assert(pkt->isResponse());
noResponseCycles = 0;
- delete req;
delete pkt;
}
if (injReqType == 0) {
// generate packet for virtual network 0
requestType = MemCmd::ReadReq;
- req = new Request(paddr, access_size, flags, masterId);
+ req = std::make_shared<Request>(paddr, access_size, flags, masterId);
} else if (injReqType == 1) {
// generate packet for virtual network 1
requestType = MemCmd::ReadReq;
flags.set(Request::INST_FETCH);
- req = new Request(0, 0x0, access_size, flags, masterId, 0x0, 0);
+ req = std::make_shared<Request>(
+ 0, 0x0, access_size, flags, masterId, 0x0, 0);
req->setPaddr(paddr);
} else { // if (injReqType == 2)
// generate packet for virtual network 2
requestType = MemCmd::WriteReq;
- req = new Request(paddr, access_size, flags, masterId);
+ req = std::make_shared<Request>(paddr, access_size, flags, masterId);
}
req->setContext(id);
void
MemTest::completeRequest(PacketPtr pkt, bool functional)
{
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
assert(req->getSize() == 1);
// this address is no longer outstanding
}
}
- delete pkt->req;
-
// the packet will delete the data
delete pkt;
bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
!uncacheable;
- RequestPtr req = new Request(paddr, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
req->setContext(id);
outstandingAddrs.insert(paddr);
}
// Prefetches are assumed to be 0 sized
- RequestPtr req = new Request(m_address, 0, flags,
+ RequestPtr req = std::make_shared<Request>(m_address, 0, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
} else {
// If the packet did not issue, must delete
delete pkt->senderState;
- delete pkt->req;
delete pkt;
DPRINTF(RubyTest,
Request::Flags flags;
- RequestPtr req = new Request(m_address, CHECK_SIZE, flags,
+ RequestPtr req = std::make_shared<Request>(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
Packet::Command cmd;
Addr writeAddr(m_address + m_store_count);
// Stores are assumed to be 1 byte-sized
- RequestPtr req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(),
- curTick(), m_pc);
+ RequestPtr req = std::make_shared<Request>(
+ writeAddr, 1, flags, m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
Packet::Command cmd;
// Note: No need to delete the data, the packet destructor
// will delete it
delete pkt->senderState;
- delete pkt->req;
delete pkt;
DPRINTF(RubyTest, "failed to initiate action - sequencer not ready\n");
}
// Checks are sized depending on the number of bytes written
- RequestPtr req = new Request(m_address, CHECK_SIZE, flags,
+ RequestPtr req = std::make_shared<Request>(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
// Note: No need to delete the data, the packet destructor
// will delete it
delete pkt->senderState;
- delete pkt->req;
delete pkt;
DPRINTF(RubyTest, "failed to initiate check - cpu port not ready\n");
// Now that the tester has completed, delete the senderState
// (includes sublock) and the packet, then return
delete pkt->senderState;
- delete pkt->req;
delete pkt;
return true;
}
Request::FlagsType flags)
{
// Create new request
- RequestPtr req = new Request(addr, size, flags, masterID);
+ RequestPtr req = std::make_shared<Request>(addr, size, flags, masterID);
// Dummy PC to have PC-based prefetchers latch on; get entropy into higher
// bits
req->setPC(((Addr)masterID) << 2);
warn("%s suppressed %d packets with non-memory addresses\n",
name(), numSuppressed);
- delete pkt->req;
delete pkt;
pkt = nullptr;
}
bool
TrafficGen::TrafficGenPort::recvTimingResp(PacketPtr pkt)
{
- delete pkt->req;
delete pkt;
return true;
}
// Create a request and the packet containing request
- Request* req = new Request(node_ptr->physAddr, node_ptr->size,
- node_ptr->flags, masterID, node_ptr->seqNum,
- ContextID(0));
+ auto req = std::make_shared<Request>(
+ node_ptr->physAddr, node_ptr->size,
+ node_ptr->flags, masterID, node_ptr->seqNum,
+ ContextID(0));
+
req->setPC(node_ptr->pc);
// If virtual address is valid, set the asid and virtual address fields
// of the request.
{
// Create new request
- Request* req = new Request(addr, size, flags, masterID);
+ auto req = std::make_shared<Request>(addr, size, flags, masterID);
req->setPC(pc);
// If this is not done it triggers assert in L1 cache for invalid contextId
TraceCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{
// All responses on the instruction fetch side are ignored. Simply delete
- // the request and packet to free allocated memory
- delete pkt->req;
+ // the packet to free allocated memory
delete pkt;
return true;
// Handle the responses for data memory requests which is done inside the
// elastic data generator
owner->dcacheRecvTimingResp(pkt);
- // After processing the response delete the request and packet to free
+ // After processing the response delete the packet to free
// memory
- delete pkt->req;
delete pkt;
return true;
* Single translation state. We set the number of outstanding
* translations to one and indicate that it is not split.
*/
- WholeTranslationState(RequestPtr _req, uint8_t *_data, uint64_t *_res,
- BaseTLB::Mode _mode)
+ WholeTranslationState(const RequestPtr &_req, uint8_t *_data,
+ uint64_t *_res, BaseTLB::Mode _mode)
: outstanding(1), delay(false), isSplit(false), mainReq(_req),
sreqLow(NULL), sreqHigh(NULL), data(_data), res(_res), mode(_mode)
{
* number of outstanding translations to two and then mark this as a
* split translation.
*/
- WholeTranslationState(RequestPtr _req, RequestPtr _sreqLow,
- RequestPtr _sreqHigh, uint8_t *_data, uint64_t *_res,
- BaseTLB::Mode _mode)
+ WholeTranslationState(const RequestPtr &_req, const RequestPtr &_sreqLow,
+ const RequestPtr &_sreqHigh, uint8_t *_data,
+ uint64_t *_res, BaseTLB::Mode _mode)
: outstanding(2), delay(false), isSplit(true), mainReq(_req),
sreqLow(_sreqLow), sreqHigh(_sreqHigh), data(_data), res(_res),
mode(_mode)
void
deleteReqs()
{
- delete mainReq;
+ mainReq.reset();
if (isSplit) {
- delete sreqLow;
- delete sreqHigh;
+ sreqLow.reset();
+ sreqHigh.reset();
}
}
};
* translation is complete if the state says so.
*/
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode)
{
assert(state);
delete state;
}
- // delete the request that we created and also the packet
- delete pkt->req;
+ // delete the packet
delete pkt;
// we might be drained at this point, if so signal the drain event
event ? event->scheduled() : -1);
for (ChunkGenerator gen(addr, size, sys->cacheLineSize());
!gen.done(); gen.next()) {
- req = new Request(gen.addr(), gen.size(), flag, masterId);
+
+ req = std::make_shared<Request>(
+ gen.addr(), gen.size(), flag, masterId);
+
req->taskId(ContextSwitchTaskId::DMA);
PacketPtr pkt = new Packet(req, cmd);
X86ISA::I82094AA::recvResponse(PacketPtr pkt)
{
// Packet instantiated calling sendMessage() in signalInterrupt()
- delete pkt->req;
delete pkt;
return 0;
}
}
delete pkt->senderState;
- delete pkt->req;
delete pkt;
return true;
} else if (pkt->req->isKernel() && pkt->req->isAcquire()) {
}
delete pkt->senderState;
- delete pkt->req;
delete pkt;
return true;
}
delete sender_state->tlbEntry;
delete new_pkt;
delete pkt->senderState;
- delete pkt->req;
delete pkt;
}
}
void
ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch,
- Request* req)
+ RequestPtr req)
{
assert(gpuDynInst->isGlobalSeg());
if (!req) {
- req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
+ req = std::make_shared<Request>(
+ 0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
}
req->setPaddr(0);
if (kernelLaunch) {
}
delete pkt->senderState;
- delete pkt->req;
delete pkt;
}
if (!stride)
break;
- RequestPtr prefetch_req = new Request(0, vaddr + stride * pf *
- TheISA::PageBytes,
- sizeof(uint8_t), 0,
- computeUnit->masterId(),
- 0, 0, 0);
+ RequestPtr prefetch_req = std::make_shared<Request>(
+ 0, vaddr + stride * pf * TheISA::PageBytes,
+ sizeof(uint8_t), 0,
+ computeUnit->masterId(),
+ 0, 0, nullptr);
PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd);
uint8_t foo = 0;
delete tlb_state->tlbEntry;
delete tlb_state;
- delete prefetch_pkt->req;
delete prefetch_pkt;
}
}
{
// this is just a request to carry the GPUDynInstPtr
// back and forth
- RequestPtr newRequest = new Request();
+ RequestPtr newRequest = std::make_shared<Request>();
newRequest->setPaddr(0x0);
// ReadReq is not evaluted by the LDS but the Packet ctor requires this
GPUDynInstPtr gpuDynInst = senderState->getMemInst();
delete packet->senderState;
- delete packet->req;
delete packet;
computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst);
}
// set up virtual request
- RequestPtr req = new Request(0, vaddr, size, Request::INST_FETCH,
- computeUnit->masterId(), 0, 0, 0);
+ RequestPtr req = std::make_shared<Request>(
+ 0, vaddr, size, Request::INST_FETCH,
+ computeUnit->masterId(), 0, 0, nullptr);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
// This fetchBlock is kind of faux right now - because the translations so
wavefront->pendingFetch = false;
delete pkt->senderState;
- delete pkt->req;
delete pkt;
}
}
Fault
- GpuTLB::translateInt(RequestPtr req, ThreadContext *tc)
+ GpuTLB::translateInt(const RequestPtr &req, ThreadContext *tc)
{
DPRINTF(GPUTLB, "Addresses references internal memory.\n");
Addr vaddr = req->getVaddr();
* On a hit it will update the LRU stack.
*/
bool
- GpuTLB::tlbLookup(RequestPtr req, ThreadContext *tc, bool update_stats)
+ GpuTLB::tlbLookup(const RequestPtr &req,
+ ThreadContext *tc, bool update_stats)
{
bool tlb_hit = false;
#ifndef NDEBUG
}
Fault
- GpuTLB::translate(RequestPtr req, ThreadContext *tc,
+ GpuTLB::translate(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode,
bool &delayedResponse, bool timing, int &latency)
{
};
Fault
- GpuTLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
- int &latency)
+ GpuTLB::translateAtomic(const RequestPtr &req, ThreadContext *tc,
+ Mode mode, int &latency)
{
bool delayedResponse;
}
void
- GpuTLB::translateTiming(RequestPtr req, ThreadContext *tc,
+ GpuTLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, int &latency)
{
bool delayedResponse;
}
tlbOutcome lookup_outcome = TLB_MISS;
- RequestPtr tmp_req = pkt->req;
+ const RequestPtr &tmp_req = pkt->req;
// Access the TLB and figure out if it's a hit or a miss.
bool success = tlbLookup(tmp_req, tmp_tc, update_stats);
* may be responsible for cleaning itslef up which will happen in
* this function. Once it's called the object is no longer valid.
*/
- virtual void finish(Fault fault, RequestPtr req, ThreadContext *tc,
- Mode mode) = 0;
+ virtual void finish(Fault fault, const RequestPtr &req,
+ ThreadContext *tc, Mode mode) = 0;
};
void dumpAll();
*/
std::vector<EntryList> entryList;
- Fault translateInt(RequestPtr req, ThreadContext *tc);
+ Fault translateInt(const RequestPtr &req, ThreadContext *tc);
- Fault translate(RequestPtr req, ThreadContext *tc,
+ Fault translate(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, bool &delayedResponse,
bool timing, int &latency);
void printAccessPattern();
- Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
- int &latency);
+ Fault translateAtomic(const RequestPtr &req, ThreadContext *tc,
+ Mode mode, int &latency);
- void translateTiming(RequestPtr req, ThreadContext *tc,
+ void translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode,
int &latency);
virtual void unserialize(CheckpointIn& cp);
void issueTranslation();
enum tlbOutcome {TLB_HIT, TLB_MISS, PAGE_WALK, MISS_RETURN};
- bool tlbLookup(RequestPtr req, ThreadContext *tc, bool update_stats);
+ bool tlbLookup(const RequestPtr &req,
+ ThreadContext *tc, bool update_stats);
void handleTranslationReturn(Addr addr, tlbOutcome outcome,
PacketPtr pkt);
}
void
-Shader::doFunctionalAccess(RequestPtr req, MemCmd cmd, void *data,
+Shader::doFunctionalAccess(const RequestPtr &req, MemCmd cmd, void *data,
bool suppress_func_errors, int cu_id)
{
int block_size = cuList.at(cu_id)->cacheLineSize();
for (ChunkGenerator gen(address, size, cuList.at(cu_id)->cacheLineSize());
!gen.done(); gen.next()) {
- RequestPtr req = new Request(0, gen.addr(), gen.size(), 0,
- cuList[0]->masterId(), 0, 0, 0);
+
+ RequestPtr req = std::make_shared<Request>(
+ 0, gen.addr(), gen.size(), 0,
+ cuList[0]->masterId(), 0, 0, nullptr);
doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id);
data_buf += gen.size();
- delete req;
}
}
void WriteMem(uint64_t address, void *ptr, uint32_t sz, int cu_id,
bool suppress_func_errors);
- void doFunctionalAccess(RequestPtr req, MemCmd cmd, void *data,
+ void doFunctionalAccess(const RequestPtr &req, MemCmd cmd, void *data,
bool suppress_func_errors, int cu_id);
void
// Write back the data.
// Create a new request-packet pair
- RequestPtr req = new Request(block->first, blockSize, 0, 0);
+ RequestPtr req = std::make_shared<Request>(
+ block->first, blockSize, 0, 0);
+
PacketPtr new_pkt = new Packet(req, MemCmd::WritebackDirty, blockSize);
new_pkt->dataDynamic(block->second); // This will be deleted later
void
AbstractMemory::trackLoadLocked(PacketPtr pkt)
{
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
// first we check if we already have a locked addr for this
bool
AbstractMemory::checkLockedAddrList(PacketPtr pkt)
{
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
bool isLLSC = pkt->isLLSC();
static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
// check for matching execution context
- bool matchesContext(RequestPtr req) const
+ bool matchesContext(const RequestPtr &req) const
{
return (contextId == req->contextId());
}
- LockedAddr(RequestPtr req) : addr(mask(req->getPaddr())),
- contextId(req->contextId())
+ LockedAddr(const RequestPtr &req) : addr(mask(req->getPaddr())),
+ contextId(req->contextId())
{}
// constructor for unserialization use
// this method must be called on *all* stores since even
// non-conditional stores must clear any matching lock addresses.
bool writeOK(PacketPtr pkt) {
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
if (lockedAddrList.empty()) {
// no locked addrs: nothing to check, store_conditional fails
bool isLLSC = pkt->isLLSC();
return allocateMissBuffer(pkt, curTick(), false);
} else {
// free the request and packet
- delete pkt->req;
delete pkt;
}
}
writebacks[Request::wbMasterId]++;
- RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0,
- Request::wbMasterId);
+ RequestPtr req = std::make_shared<Request>(
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+
if (blk->isSecure())
req->setFlags(Request::SECURE);
PacketPtr
BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
{
- RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0,
- Request::wbMasterId);
+ RequestPtr req = std::make_shared<Request>(
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+
if (blk->isSecure()) {
req->setFlags(Request::SECURE);
}
if (blk.isDirty()) {
assert(blk.isValid());
- Request request(regenerateBlkAddr(&blk),
- blkSize, 0, Request::funcMasterId);
- request.taskId(blk.task_id);
+ RequestPtr request = std::make_shared<Request>(
+ regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
+
+ request->taskId(blk.task_id);
if (blk.isSecure()) {
- request.setFlags(Request::SECURE);
+ request->setFlags(Request::SECURE);
}
- Packet packet(&request, MemCmd::WriteReq);
+ Packet packet(request, MemCmd::WriteReq);
packet.dataStatic(blk.data);
memSidePort.sendFunctional(&packet);
// check for matching execution context, and an address that
// is within the lock
- bool matches(const RequestPtr req) const
+ bool matches(const RequestPtr &req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() -1;
}
// check if a request is intersecting and thus invalidating the lock
- bool intersects(const RequestPtr req) const
+ bool intersects(const RequestPtr &req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() - 1;
return (req_low <= highAddr) && (req_high >= lowAddr);
}
- Lock(const RequestPtr req)
+ Lock(const RequestPtr &req)
: contextId(req->contextId()),
lowAddr(req->getPaddr()),
highAddr(lowAddr + req->getSize() - 1)
* Clear the any load lock that intersect the request, and is from
* a different context.
*/
- void clearLoadLocks(RequestPtr req)
+ void clearLoadLocks(const RequestPtr &req)
{
auto l = lockList.begin();
while (l != lockList.end()) {
if (!pkt->isLLSC() && lockList.empty())
return true;
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
if (pkt->isLLSC()) {
// it's a store conditional... have to check for matching
if (!mshr) {
// copy the request and create a new SoftPFReq packet
- RequestPtr req = new Request(pkt->req->getPaddr(),
- pkt->req->getSize(),
- pkt->req->getFlags(),
- pkt->req->masterId());
+ RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
+ pkt->req->getSize(),
+ pkt->req->getFlags(),
+ pkt->req->masterId());
pf = new Packet(req, pkt->cmd);
pf->allocate();
assert(pf->getAddr() == pkt->getAddr());
// immediately with dummy data so the core would be able to
// retire it. This request completes right here, so we
// deallocate it.
- delete tgt_pkt->req;
delete tgt_pkt;
break; // skip response
}
assert(tgt_pkt->cmd == MemCmd::HardPFReq);
if (blk)
blk->status |= BlkHWPrefetched;
- delete tgt_pkt->req;
delete tgt_pkt;
break;
{
assert(!writebackClean);
assert(blk && blk->isValid() && !blk->isDirty());
+
// Creating a zero sized write, a message to the snoop filter
+ RequestPtr req = std::make_shared<Request>(
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
- RequestPtr req =
- new Request(regenerateBlkAddr(blk), blkSize, 0,
- Request::wbMasterId);
if (blk->isSecure())
req->setFlags(Request::SECURE);
if (!respond && is_deferred) {
assert(pkt->needsResponse());
-
- // if we copied the deferred packet with the intention to
- // respond, but are not responding, then a cache above us must
- // be, and we can use this as the indication of whether this
- // is a packet where we created a copy of the request or not
- if (!pkt->cacheResponding()) {
- delete pkt->req;
- }
-
delete pkt;
}
}
// given that no response is expected, delete Request and Packet
- delete tgt_pkt->req;
delete tgt_pkt;
return false;
// the packet and the request as part of handling the deferred
// snoop.
PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
- new Packet(new Request(*pkt->req), pkt->cmd, blkSize, pkt->id);
+ new Packet(std::make_shared<Request>(*pkt->req), pkt->cmd,
+ blkSize, pkt->id);
if (will_respond) {
// we are the ordering point, and will consequently
// We have filled the block and the prefetcher does not
// require responses.
- delete tgt_pkt->req;
delete tgt_pkt;
break;
{
// Delete the queued prefetch packets
for (DeferredPacket &p : pfq) {
- delete p.pkt->req;
delete p.pkt;
}
}
while (itr != pfq.end()) {
if (itr->pkt->getAddr() == blk_addr &&
itr->pkt->isSecure() == is_secure) {
- delete itr->pkt->req;
delete itr->pkt;
itr = pfq.erase(itr);
} else {
/* Create a prefetch memory request */
RequestPtr pf_req =
- new Request(pf_info.first, blkSize, 0, masterId);
+ std::make_shared<Request>(pf_info.first, blkSize, 0, masterId);
if (is_secure) {
pf_req->setFlags(Request::SECURE);
}
DPRINTF(HWPrefetch, "Prefetch queue full, removing lowest priority "
"oldest packet, addr: %#x", it->pkt->getAddr());
- delete it->pkt->req;
delete it->pkt;
pfq.erase(it);
}
const PacketId id;
/// A pointer to the original request.
- const RequestPtr req;
+ RequestPtr req;
private:
/**
* first, but the Requests's physical address and size fields need
* not be valid. The command must be supplied.
*/
- Packet(const RequestPtr _req, MemCmd _cmd)
- : cmd(_cmd), id((PacketId)_req), req(_req), data(nullptr), addr(0),
- _isSecure(false), size(0), headerDelay(0), snoopDelay(0),
+ Packet(const RequestPtr &_req, MemCmd _cmd)
+ : cmd(_cmd), id((PacketId)_req.get()), req(_req), data(nullptr),
+ addr(0), _isSecure(false), size(0), headerDelay(0), snoopDelay(0),
payloadDelay(0), senderState(NULL)
{
if (req->hasPaddr()) {
* a request that is for a whole block, not the address from the
* req. this allows for overriding the size/addr of the req.
*/
- Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize, PacketId _id = 0)
- : cmd(_cmd), id(_id ? _id : (PacketId)_req), req(_req), data(nullptr),
- addr(0), _isSecure(false), headerDelay(0), snoopDelay(0),
- payloadDelay(0), senderState(NULL)
+ Packet(const RequestPtr &_req, MemCmd _cmd, int _blkSize, PacketId _id = 0)
+ : cmd(_cmd), id(_id ? _id : (PacketId)_req.get()), req(_req),
+ data(nullptr), addr(0), _isSecure(false), headerDelay(0),
+ snoopDelay(0), payloadDelay(0), senderState(NULL)
{
if (req->hasPaddr()) {
addr = req->getPaddr() & ~(_blkSize - 1);
* Generate the appropriate read MemCmd based on the Request flags.
*/
static MemCmd
- makeReadCmd(const RequestPtr req)
+ makeReadCmd(const RequestPtr &req)
{
if (req->isLLSC())
return MemCmd::LoadLockedReq;
* Generate the appropriate write MemCmd based on the Request flags.
*/
static MemCmd
- makeWriteCmd(const RequestPtr req)
+ makeWriteCmd(const RequestPtr &req)
{
if (req->isLLSC())
return MemCmd::StoreCondReq;
* Fine-tune the MemCmd type if it's not a vanilla read or write.
*/
static PacketPtr
- createRead(const RequestPtr req)
+ createRead(const RequestPtr &req)
{
return new Packet(req, makeReadCmd(req));
}
static PacketPtr
- createWrite(const RequestPtr req)
+ createWrite(const RequestPtr &req)
{
return new Packet(req, makeWriteCmd(req));
}
*/
~Packet()
{
- // Delete the request object if this is a request packet which
- // does not need a response, because the requester will not get
- // a chance. If the request packet needs a response then the
- // request will be deleted on receipt of the response
- // packet. We also make sure to never delete the request for
- // express snoops, even for cases when responses are not
- // needed (CleanEvict and Writeback), since the snoop packet
- // re-uses the same request.
- if (req && isRequest() && !needsResponse() &&
- !isExpressSnoop()) {
- delete req;
- }
deleteData();
}
}
Fault
-EmulationPageTable::translate(RequestPtr req)
+EmulationPageTable::translate(const RequestPtr &req)
{
Addr paddr;
assert(pageAlign(req->getVaddr() + req->getSize() - 1) ==
* field of req.
* @param req The memory request.
*/
- Fault translate(RequestPtr req);
+ Fault translate(const RequestPtr &req);
void getMappings(std::vector<std::pair<Addr, Addr>> *addr_mappings);
void
MasterPort::printAddr(Addr a)
{
- Request req(a, 1, 0, Request::funcMasterId);
- Packet pkt(&req, MemCmd::PrintReq);
+ auto req = std::make_shared<Request>(
+ a, 1, 0, Request::funcMasterId);
+
+ Packet pkt(req, MemCmd::PrintReq);
Packet::PrintReqState prs(std::cerr);
pkt.senderState = &prs;
{
for (ChunkGenerator gen(addr, size, _cacheLineSize); !gen.done();
gen.next()) {
- Request req(gen.addr(), gen.size(), flags, Request::funcMasterId);
- Packet pkt(&req, MemCmd::ReadReq);
+
+ auto req = std::make_shared<Request>(
+ gen.addr(), gen.size(), flags, Request::funcMasterId);
+
+ Packet pkt(req, MemCmd::ReadReq);
pkt.dataStatic(p);
_port.sendFunctional(&pkt);
p += gen.size();
{
for (ChunkGenerator gen(addr, size, _cacheLineSize); !gen.done();
gen.next()) {
- Request req(gen.addr(), gen.size(), flags, Request::funcMasterId);
- Packet pkt(&req, MemCmd::WriteReq);
+
+ auto req = std::make_shared<Request>(
+ gen.addr(), gen.size(), flags, Request::funcMasterId);
+
+ Packet pkt(req, MemCmd::WriteReq);
pkt.dataStaticConst(p);
_port.sendFunctional(&pkt);
p += gen.size();
class Request;
-typedef Request* RequestPtr;
+typedef std::shared_ptr<Request> RequestPtr;
typedef uint16_t MasterID;
class Request
assert(privateFlags.isSet(VALID_VADDR));
assert(privateFlags.noneSet(VALID_PADDR));
assert(split_addr > _vaddr && split_addr < _vaddr + _size);
- req1 = new Request(*this);
- req2 = new Request(*this);
+ req1 = std::make_shared<Request>(*this);
+ req2 = std::make_shared<Request>(*this);
req1->_size = split_addr - _vaddr;
req2->_vaddr = split_addr;
req2->_size = _size - req1->_size;
AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
Cycles latency)
{
- RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
- m_masterId);
+ RequestPtr req = std::make_shared<Request>(
+ addr, RubySystem::getBlockSizeBytes(), 0, m_masterId);
PacketPtr pkt = Packet::createRead(req);
uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
Cycles latency, const DataBlock &block)
{
- RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
- m_masterId);
+ RequestPtr req = std::make_shared<Request>(
+ addr, RubySystem::getBlockSizeBytes(), 0, m_masterId);
PacketPtr pkt = Packet::createWrite(req);
uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
Cycles latency,
const DataBlock &block, int size)
{
- RequestPtr req = new Request(addr, size, 0, m_masterId);
+ RequestPtr req = std::make_shared<Request>(addr, size, 0, m_masterId);
PacketPtr pkt = Packet::createWrite(req);
uint8_t *newData = new uint8_t[size];
}
getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
- delete pkt->req;
delete pkt;
}
if (m_records_flushed < m_records.size()) {
TraceRecord* rec = m_records[m_records_flushed];
m_records_flushed++;
- Request* req = new Request(rec->m_data_address,
- m_block_size_bytes, 0,
- Request::funcMasterId);
+ auto req = std::make_shared<Request>(rec->m_data_address,
+ m_block_size_bytes, 0,
+ Request::funcMasterId);
MemCmd::Command requestType = MemCmd::FlushReq;
Packet *pkt = new Packet(req, requestType);
for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
rec_bytes_read += RubySystem::getBlockSizeBytes()) {
- Request* req = nullptr;
+ RequestPtr req;
MemCmd::Command requestType;
if (traceRecord->m_type == RubyRequestType_LD) {
requestType = MemCmd::ReadReq;
- req = new Request(traceRecord->m_data_address + rec_bytes_read,
+ req = std::make_shared<Request>(
+ traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
} else if (traceRecord->m_type == RubyRequestType_IFETCH) {
requestType = MemCmd::ReadReq;
- req = new Request(traceRecord->m_data_address + rec_bytes_read,
+ req = std::make_shared<Request>(
+ traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(),
Request::INST_FETCH, Request::funcMasterId);
} else {
requestType = MemCmd::WriteReq;
- req = new Request(traceRecord->m_data_address + rec_bytes_read,
+ req = std::make_shared<Request>(
+ traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
}
}
HSAScope
-reqScopeToHSAScope(Request* req)
+reqScopeToHSAScope(const RequestPtr &req)
{
HSAScope accessScope = HSAScope_UNSPECIFIED;
if (req->isScoped()) {
}
HSASegment
-reqSegmentToHSASegment(Request* req)
+reqSegmentToHSASegment(const RequestPtr &req)
{
HSASegment accessSegment = HSASegment_GLOBAL;
class RubyGPUCoalescerParams;
-HSAScope reqScopeToHSAScope(Request* req);
-HSASegment reqSegmentToHSASegment(Request* req);
+HSAScope reqScopeToHSAScope(const RequestPtr &req);
+HSASegment reqSegmentToHSASegment(const RequestPtr &req);
struct GPUCoalescerRequest
{
// Allocate the invalidate request and packet on the stack, as it is
// assumed they will not be modified or deleted by receivers.
// TODO: should this really be using funcMasterId?
- Request request(address, RubySystem::getBlockSizeBytes(), 0,
- Request::funcMasterId);
+ auto request = std::make_shared<Request>(
+ address, RubySystem::getBlockSizeBytes(), 0,
+ Request::funcMasterId);
+
// Use a single packet to signal all snooping ports of the invalidation.
// This assumes that snooping ports do NOT modify the packet/request
- Packet pkt(&request, MemCmd::InvalidateReq);
+ Packet pkt(request, MemCmd::InvalidateReq);
for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
// check if the connected master port is snooping
if ((*p)->isSnooping()) {
RubySystem *rs = m_ruby_system;
if (RubySystem::getWarmupEnabled()) {
assert(pkt->req);
- delete pkt->req;
delete pkt;
rs->m_cache_recorder->enqueueNextFetchRequest();
} else if (RubySystem::getCooldownEnabled()) {
SCMasterPort::generatePacket(tlm::tlm_generic_payload& trans)
{
Request::Flags flags;
- auto req = new Request(trans.get_address(), trans.get_data_length(), flags,
- owner.masterId);
+ auto req = std::make_shared<Request>(
+ trans.get_address(), trans.get_data_length(), flags,
+ owner.masterId);
MemCmd cmd;