template <class XC>
inline void
-handleLockedRead(XC *xc, Request *req)
+handleLockedRead(XC *xc, RequestPtr req)
{
xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr() & ~0xf);
xc->setMiscReg(MISCREG_LOCKFLAG, true);
template <class XC>
inline bool
-handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
{
if (req->isUncacheable()) {
// Funky Turbolaser mailbox access...don't update
template <class XC>
inline void
-handleLockedRead(XC *xc, Request *req)
+handleLockedRead(XC *xc, RequestPtr req)
{
xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr());
xc->setMiscReg(MISCREG_LOCKFLAG, true);
template <class XC>
inline bool
-handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
{
if (req->isSwap())
return true;
template <class XC>
inline void
-handleLockedRead(XC *xc, Request *req)
+handleLockedRead(XC *xc, RequestPtr req)
{
}
template <class XC>
inline bool
-handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
{
return true;
}
*d = gpuDynInst->wavefront()->ldsChunk->
read<c0>(vaddr);
} else {
- Request *req = new Request(0, vaddr, sizeof(c0), 0,
- gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId);
+ RequestPtr req = new Request(0,
+ vaddr, sizeof(c0), 0,
+ gpuDynInst->computeUnit()->masterId(),
+ 0, gpuDynInst->wfDynId);
gpuDynInst->setRequestFlags(req);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
gpuDynInst->statusBitVector = VectorMask(1);
gpuDynInst->useContinuation = false;
// create request
- Request *req = new Request(0, 0, 0, 0,
+ RequestPtr req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
gpuDynInst->execContinuation = &GPUStaticInst::execSt;
gpuDynInst->useContinuation = true;
// create request
- Request *req = new Request(0, 0, 0, 0,
+ RequestPtr req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
gpuDynInst->wavefront()->ldsChunk->write<c0>(vaddr,
*d);
} else {
- Request *req =
+ RequestPtr req =
new Request(0, vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
gpuDynInst->useContinuation = true;
// create request
- Request *req = new Request(0, 0, 0, 0,
+ RequestPtr req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
"type.\n");
}
} else {
- Request *req =
+ RequestPtr req =
new Request(0, vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId,
// the acquire completes
gpuDynInst->useContinuation = false;
// create request
- Request *req = new Request(0, 0, 0, 0,
+ RequestPtr req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
template <class XC>
inline void
-handleLockedRead(XC *xc, Request *req)
+handleLockedRead(XC *xc, RequestPtr req)
{
xc->setMiscReg(MISCREG_LLADDR, req->getPaddr() & ~0xf);
xc->setMiscReg(MISCREG_LLFLAG, true);
template <class XC>
inline bool
-handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
{
if (req->isUncacheable()) {
// Funky Turbolaser mailbox access...don't update
template <class XC> inline void
-handleLockedRead(XC *xc, Request *req)
+handleLockedRead(XC *xc, RequestPtr req)
{
locked_addrs.push(req->getPaddr() & ~0xF);
DPRINTF(LLSC, "[cid:%d]: Reserved address %x.\n",
{}
template <class XC> inline bool
-handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask)
{
// Normally RISC-V uses zero to indicate success and nonzero to indicate
// failure (right now only 1 is reserved), but in gem5 zero indicates
Request::Flags flags)
{
instFlags[ReqMade] = true;
- Request *req = NULL;
- Request *sreqLow = NULL;
- Request *sreqHigh = NULL;
+ RequestPtr req = NULL;
+ RequestPtr sreqLow = NULL;
+ RequestPtr sreqHigh = NULL;
if (instFlags[ReqMade] && translationStarted()) {
req = savedReq;
traceData->setMem(addr, size, flags);
instFlags[ReqMade] = true;
- Request *req = NULL;
- Request *sreqLow = NULL;
- Request *sreqHigh = NULL;
+ RequestPtr req = NULL;
+ RequestPtr sreqLow = NULL;
+ RequestPtr sreqHigh = NULL;
if (instFlags[ReqMade] && translationStarted()) {
req = savedReq;
* Checks if the flags set by the Checker and Checkee match.
*/
bool
-CheckerCPU::checkFlags(Request *unverified_req, Addr vAddr,
+CheckerCPU::checkFlags(RequestPtr unverified_req, Addr vAddr,
Addr pAddr, int flags)
{
Addr unverifiedVAddr = unverified_req->getVaddr();
dumpAndExit();
}
- bool checkFlags(Request *unverified_req, Addr vAddr,
+ bool checkFlags(RequestPtr unverified_req, Addr vAddr,
Addr pAddr, int flags);
void dumpAndExit();
SimpleThread *threadBase() { return thread; }
InstResult unverifiedResult;
- Request *unverifiedReq;
+ RequestPtr unverifiedReq;
uint8_t *unverifiedMemData;
bool changedPC;
}
}
- Request *fragment = new Request();
+ RequestPtr fragment = new Request();
fragment->setContext(request.contextId());
fragment->setVirt(0 /* asid */,
for (unsigned int fragment_index = 0; fragment_index < numFragments;
fragment_index++)
{
- Request *fragment = fragmentRequests[fragment_index];
+ RequestPtr fragment = fragmentRequests[fragment_index];
DPRINTFS(MinorMem, (&port), "Making packet %d for request: %s"
" (%d, 0x%x)\n",
/** Fragment Requests corresponding to the address ranges of
* each fragment */
- std::vector<Request *> fragmentRequests;
+ std::vector<RequestPtr> fragmentRequests;
/** Packets matching fragmentRequests to issue fragments to memory */
std::vector<Packet *> fragmentPackets;
public:
/** Executes the load at the given index. */
- Fault read(Request *req, Request *sreqLow, Request *sreqHigh,
+ Fault read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
int load_idx);
/** Executes the store at the given index. */
- Fault write(Request *req, Request *sreqLow, Request *sreqHigh,
+ Fault write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
uint8_t *data, int store_idx);
/** Returns the index of the head load instruction. */
template <class Impl>
Fault
-LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
+LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
int load_idx)
{
DynInstPtr load_inst = loadQueue[load_idx];
template <class Impl>
Fault
-LSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh,
+LSQUnit<Impl>::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
uint8_t *data, int store_idx)
{
assert(storeQueue[store_idx].inst);
DynInstPtr inst = storeQueue[storeWBIdx].inst;
- Request *req = storeQueue[storeWBIdx].req;
+ RequestPtr req = storeQueue[storeWBIdx].req;
RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
SimpleThread* thread = t_info.thread;
// use the CPU's statically allocated read request and packet objects
- Request *req = &data_read_req;
+ RequestPtr req = &data_read_req;
if (traceData)
traceData->setMem(addr, size, flags);
}
// use the CPU's statically allocated write request and packet objects
- Request *req = &data_write_req;
+ RequestPtr req = &data_write_req;
if (traceData)
traceData->setMem(addr, size, flags);
void
-BaseSimpleCPU::setupFetchRequest(Request *req)
+BaseSimpleCPU::setupFetchRequest(RequestPtr req)
{
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
void checkForInterrupts();
- void setupFetchRequest(Request *req);
+ void setupFetchRequest(RequestPtr req);
void preExecute();
void postExecute();
void advancePC(const Fault &fault);
if (needToFetch) {
_status = BaseSimpleCPU::Running;
- Request *ifetch_req = new Request();
+ RequestPtr ifetch_req = new Request();
ifetch_req->taskId(taskId());
ifetch_req->setContext(thread->contextId());
setupFetchRequest(ifetch_req);
Packet::Command cmd;
// For simplicity, requests are assumed to be 1 byte-sized
- Request *req = new Request(m_address, 1, flags, masterId);
+ RequestPtr req = new Request(m_address, 1, flags, masterId);
//
// Based on the current state, issue a load or a store
Request::Flags flags;
// For simplicity, requests are assumed to be 1 byte-sized
- Request *req = new Request(m_address, 1, flags, masterId);
+ RequestPtr req = new Request(m_address, 1, flags, masterId);
Packet::Command cmd;
bool do_write = (random_mt.random(0, 100) < m_percent_writes);
void
GarnetSyntheticTraffic::completeRequest(PacketPtr pkt)
{
- Request *req = pkt->req;
+ RequestPtr req = pkt->req;
DPRINTF(GarnetSyntheticTraffic,
"Completed injection of %s packet for address %x\n",
//
MemCmd::Command requestType;
- Request *req = nullptr;
+ RequestPtr req = nullptr;
Request::Flags flags;
// Inject in specific Vnet
void
MemTest::completeRequest(PacketPtr pkt, bool functional)
{
- Request *req = pkt->req;
+ RequestPtr req = pkt->req;
assert(req->getSize() == 1);
// this address is no longer outstanding
bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
!uncacheable;
- Request *req = new Request(paddr, 1, flags, masterId);
+ RequestPtr req = new Request(paddr, 1, flags, masterId);
req->setContext(id);
outstandingAddrs.insert(paddr);
}
// Prefetches are assumed to be 0 sized
- Request *req = new Request(m_address, 0, flags,
+ RequestPtr req = new Request(m_address, 0, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
Request::Flags flags;
- Request *req = new Request(m_address, CHECK_SIZE, flags,
+ RequestPtr req = new Request(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
Packet::Command cmd;
Addr writeAddr(m_address + m_store_count);
// Stores are assumed to be 1 byte-sized
- Request *req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(),
+ RequestPtr req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(),
curTick(), m_pc);
req->setContext(index);
}
// Checks are sized depending on the number of bytes written
- Request *req = new Request(m_address, CHECK_SIZE, flags,
+ RequestPtr req = new Request(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
Request::FlagsType flags)
{
// Create new request
- Request *req = new Request(addr, size, flags, masterID);
+ RequestPtr req = new Request(addr, size, flags, masterID);
// Dummy PC to have PC-based prefetchers latch on; get entropy into higher
// bits
req->setPC(((Addr)masterID) << 2);
if (!stride)
break;
- Request *prefetch_req = new Request(0, vaddr + stride * pf *
+ RequestPtr prefetch_req = new Request(0, vaddr + stride * pf *
TheISA::PageBytes,
sizeof(uint8_t), 0,
computeUnit->masterId(),
{
// this is just a request to carry the GPUDynInstPtr
// back and forth
- Request *newRequest = new Request();
+ RequestPtr newRequest = new Request();
newRequest->setPaddr(0x0);
// ReadReq is not evaluted by the LDS but the Packet ctor requires this
}
// set up virtual request
- Request *req = new Request(0, vaddr, size, Request::INST_FETCH,
+ RequestPtr req = new Request(0, vaddr, size, Request::INST_FETCH,
computeUnit->masterId(), 0, 0, 0);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
}
void
- setRequestFlags(Request *req, bool setMemOrder=true)
+ setRequestFlags(RequestPtr req, bool setMemOrder=true)
{
// currently these are the easy scopes to deduce
if (isPrivateSeg()) {
for (ChunkGenerator gen(address, size, cuList.at(cu_id)->cacheLineSize());
!gen.done(); gen.next()) {
- Request *req = new Request(0, gen.addr(), gen.size(), 0,
+ RequestPtr req = new Request(0, gen.addr(), gen.size(), 0,
cuList[0]->masterId(), 0, 0, 0);
doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id);
void
AbstractMemory::trackLoadLocked(PacketPtr pkt)
{
- Request *req = pkt->req;
+ RequestPtr req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
// first we check if we already have a locked addr for this
bool
AbstractMemory::checkLockedAddrList(PacketPtr pkt)
{
- Request *req = pkt->req;
+ RequestPtr req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
bool isLLSC = pkt->isLLSC();
static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
// check for matching execution context
- bool matchesContext(Request *req) const
+ bool matchesContext(RequestPtr req) const
{
return (contextId == req->contextId());
}
- LockedAddr(Request *req) : addr(mask(req->getPaddr())),
+ LockedAddr(RequestPtr req) : addr(mask(req->getPaddr())),
contextId(req->contextId())
{}
// this method must be called on *all* stores since even
// non-conditional stores must clear any matching lock addresses.
bool writeOK(PacketPtr pkt) {
- Request *req = pkt->req;
+ RequestPtr req = pkt->req;
if (lockedAddrList.empty()) {
// no locked addrs: nothing to check, store_conditional fails
bool isLLSC = pkt->isLLSC();
writebacks[Request::wbMasterId]++;
- Request *req = new Request(regenerateBlkAddr(blk), blkSize, 0,
+ RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0,
Request::wbMasterId);
if (blk->isSecure())
req->setFlags(Request::SECURE);
PacketPtr
BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
{
- Request *req = new Request(regenerateBlkAddr(blk), blkSize, 0,
+ RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0,
Request::wbMasterId);
if (blk->isSecure()) {
req->setFlags(Request::SECURE);
assert(!writebackClean);
assert(blk && blk->isValid() && !blk->isDirty());
// Creating a zero sized write, a message to the snoop filter
- Request *req =
+
+ RequestPtr req =
new Request(regenerateBlkAddr(blk), blkSize, 0,
Request::wbMasterId);
if (blk->isSecure())
}
/* Create a prefetch memory request */
- Request *pf_req =
+ RequestPtr pf_req =
new Request(pf_info.first, blkSize, 0, masterId);
if (is_secure) {