case MISCREG_ATS1HR:
case MISCREG_ATS1HW:
{
- RequestPtr req = new Request;
unsigned flags = 0;
BaseTLB::Mode mode = BaseTLB::Read;
TLB::ArmTranslationType tranType = TLB::NormalTran;
// can't be an atomic translation because that causes problems
// with unexpected atomic snoop requests.
warn("Translating via MISCREG(%d) in functional mode! Fix Me!\n", misc_reg);
- req->setVirt(0, val, 1, flags, Request::funcMasterId,
- tc->pcState().pc());
- req->setThreadContext(tc->contextId(), tc->threadId());
- fault = tc->getDTBPtr()->translateFunctional(req, tc, mode, tranType);
+ Request req(0, val, 1, flags, Request::funcMasterId,
+ tc->pcState().pc(), tc->contextId(),
+ tc->threadId());
+ fault = tc->getDTBPtr()->translateFunctional(&req, tc, mode, tranType);
TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
HCR hcr = readMiscRegNoEffect(MISCREG_HCR);
MiscReg newVal;
if (fault == NoFault) {
- Addr paddr = req->getPaddr();
+ Addr paddr = req.getPaddr();
if (haveLPAE && (ttbcr.eae || tranType & TLB::HypMode ||
((tranType & TLB::S1S2NsTran) && hcr.vm) )) {
newVal = (paddr & mask(39, 12)) |
"MISCREG: Translated addr 0x%08x fault fsr %#x: PAR: 0x%08x\n",
val, fsr, newVal);
}
- delete req;
setMiscRegNoEffect(MISCREG_PAR, newVal);
return;
}
// Need to account for multiple accesses like the Atomic and TimingSimple
while (1) {
- memReq = new Request();
- memReq->setVirt(0, addr, size, flags, masterId, thread->pcState().instAddr());
+ memReq = new Request(0, addr, size, flags, masterId,
+ thread->pcState().instAddr(), tc->contextId(), 0);
// translate to physical address
fault = dtb->translateFunctional(memReq, tc, BaseTLB::Read);
// Need to account for a multiple access like Atomic and Timing CPUs
while (1) {
- memReq = new Request();
- memReq->setVirt(0, addr, size, flags, masterId, thread->pcState().instAddr());
+ memReq = new Request(0, addr, size, flags, masterId,
+ thread->pcState().instAddr(), tc->contextId(), 0);
// translate to physical address
fault = dtb->translateFunctional(memReq, tc, BaseTLB::Write);
// initialize CPU, including PC
if (FullSystem && !switchedOut())
TheISA::initCPU(tc, tc->contextId());
-
- mmio_req.setThreadContext(tc->contextId(), 0);
}
void
ThreadContext *tc(thread->getTC());
syncThreadContext();
- mmio_req.setPhys(paddr, size, Request::UNCACHEABLE, dataMasterId());
+ Request mmio_req(paddr, size, Request::UNCACHEABLE, dataMasterId());
+ mmio_req.setThreadContext(tc->contextId(), 0);
// Some architectures do need to massage physical addresses a bit
// before they are inserted into the memory system. This enables
// APIC accesses on x86 and m5ops where supported through a MMIO
/** Unused dummy port for the instruction interface */
KVMCpuPort instPort;
- /** Pre-allocated MMIO memory request */
- Request mmio_req;
-
/**
* Is the gem5 context dirty? Set to true to force an update of
* the KVM vCPU state upon the next call to kvmRun().
updateCPUID();
- io_req.setThreadContext(tc->contextId(), 0);
-
// TODO: Do we need to create an identity mapped TSS area? We
// should call kvm.vm.setTSSAddress() here in that case. It should
// only be needed for old versions of the virtualization
pAddr = X86ISA::x86IOAddress(port);
}
- io_req.setPhys(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
+ Request io_req(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
dataMasterId());
+ io_req.setThreadContext(tc->contextId(), 0);
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
// Temporarily lock and migrate to the event queue of the
*/
void handleIOMiscReg32(int miscreg);
- /** Reusable IO request */
- Request io_req;
-
/** Cached intersection of supported MSRs */
mutable Kvm::MSRIndexVector cachedMsrIntersection;
TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
bool read)
{
- PacketPtr pkt;
- buildPacket(pkt, req, read);
+ PacketPtr pkt = buildPacket(req, read);
pkt->dataDynamic<uint8_t>(data);
if (req->getFlags().isSet(Request::NO_ACCESS)) {
assert(!dcache_pkt);
advanceInst(fault);
}
-void
-TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
+PacketPtr
+TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
{
- pkt = read ? Packet::createRead(req) : Packet::createWrite(req);
+ return read ? Packet::createRead(req) : Packet::createWrite(req);
}
void
assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
if (req->getFlags().isSet(Request::NO_ACCESS)) {
- buildPacket(pkt1, req, read);
+ pkt1 = buildPacket(req, read);
return;
}
- buildPacket(pkt1, req1, read);
- buildPacket(pkt2, req2, read);
+ pkt1 = buildPacket(req1, read);
+ pkt2 = buildPacket(req2, read);
- req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
pkt->dataDynamic<uint8_t>(data);
void translationFault(const Fault &fault);
- void buildPacket(PacketPtr &pkt, RequestPtr req, bool read);
+ PacketPtr buildPacket(RequestPtr req, bool read);
void buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
RequestPtr req1, RequestPtr req2, RequestPtr req,
uint8_t *data, bool read);
bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
!uncacheable;
- Request *req = new Request();
+ Request *req = nullptr;
uint8_t *result = new uint8_t[8];
if (issueDmas) {
paddr &= ~((1 << dma_access_size) - 1);
- req->setPhys(paddr, 1 << dma_access_size, flags, masterId);
+ req = new Request(paddr, 1 << dma_access_size, flags, masterId);
req->setThreadContext(id,0);
} else {
paddr &= ~((1 << access_size) - 1);
- req->setPhys(paddr, 1 << access_size, flags, masterId);
+ req = new Request(paddr, 1 << access_size, flags, masterId);
req->setThreadContext(id,0);
}
assert(req->getSize() == 1);
destination = dest_y*networkDimension + dest_x;
}
- Request *req = new Request();
- Request::Flags flags;
-
// The source of the packets is a cache.
// The destination of the packets is a directory.
// The destination bits are embedded in the address after byte-offset.
//
MemCmd::Command requestType;
+ Request *req = nullptr;
+ Request::Flags flags;
+
unsigned randomReqType = random_mt.random(0, 2);
if (randomReqType == 0) {
// generate packet for virtual network 0
requestType = MemCmd::ReadReq;
- req->setPhys(paddr, access_size, flags, masterId);
+ req = new Request(paddr, access_size, flags, masterId);
} else if (randomReqType == 1) {
// generate packet for virtual network 1
requestType = MemCmd::ReadReq;
flags.set(Request::INST_FETCH);
- req->setVirt(0, 0x0, access_size, flags, 0x0, masterId);
+ req = new Request(0, 0x0, access_size, flags, masterId, 0x0, 0, 0);
req->setPaddr(paddr);
} else { // if (randomReqType == 2)
// generate packet for virtual network 2
requestType = MemCmd::WriteReq;
- req->setPhys(paddr, access_size, flags, masterId);
+ req = new Request(paddr, access_size, flags, masterId);
}
req->setThreadContext(id,0);
void
PortProxy::readBlob(Addr addr, uint8_t *p, int size) const
{
- Request req;
-
for (ChunkGenerator gen(addr, size, _cacheLineSize); !gen.done();
gen.next()) {
- req.setPhys(gen.addr(), gen.size(), 0, Request::funcMasterId);
+ Request req(gen.addr(), gen.size(), 0, Request::funcMasterId);
Packet pkt(&req, MemCmd::ReadReq);
pkt.dataStatic(p);
_port.sendFunctional(&pkt);
void
PortProxy::writeBlob(Addr addr, const uint8_t *p, int size) const
{
- Request req;
-
for (ChunkGenerator gen(addr, size, _cacheLineSize); !gen.done();
gen.next()) {
- req.setPhys(gen.addr(), gen.size(), 0, Request::funcMasterId);
+ Request req(gen.addr(), gen.size(), 0, Request::funcMasterId);
Packet pkt(&req, MemCmd::WriteReq);
pkt.dataStaticConst(p);
_port.sendFunctional(&pkt);
VALID_CONTEXT_ID | VALID_THREAD_ID;
private:
+
+ /**
+ * Set up a physical (e.g. device) request in a previously
+ * allocated Request object.
+ */
+ void
+ setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
+ {
+ assert(size >= 0);
+ _paddr = paddr;
+ _size = size;
+ _time = time;
+ _masterId = mid;
+ _flags.clear(~STICKY_FLAGS);
+ _flags.set(flags);
+ privateFlags.clear(~STICKY_PRIVATE_FLAGS);
+ privateFlags.set(VALID_PADDR|VALID_SIZE);
+ depth = 0;
+ accessDelta = 0;
+ //translateDelta = 0;
+ }
+
/**
* The physical address of the request. Valid only if validPaddr
* is set.
* paddr is written via setVirt() or setPhys(), so it is always
* valid as long as one of the address fields is valid.
*/
- int _size;
+ unsigned _size;
/** The requestor ID which is unique in the system for all ports
* that are capable of issuing a transaction
Addr _pc;
public:
- /** Minimal constructor. No fields are initialized.
- * (Note that _flags and privateFlags are cleared by Flags
- * default constructor.)
+
+ /**
+ * Minimal constructor. No fields are initialized. (Note that
+ * _flags and privateFlags are cleared by Flags default
+ * constructor.)
*/
Request()
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
/**
* Constructor for physical (e.g. device) requests. Initializes
* just physical address, size, flags, and timestamp (to curTick()).
- * These fields are adequate to perform a request.
+ * These fields are adequate to perform a request.
*/
- Request(Addr paddr, int size, Flags flags, MasterID mid)
+ Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
_extraData(0), _contextId(0), _threadId(0), _pc(0),
translateDelta(0), accessDelta(0), depth(0)
{
- setPhys(paddr, size, flags, mid);
+ setPhys(paddr, size, flags, mid, curTick());
}
- Request(Addr paddr, int size, Flags flags, MasterID mid, Tick time)
+ Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
_extraData(0), _contextId(0), _threadId(0), _pc(0),
setPhys(paddr, size, flags, mid, time);
}
- Request(Addr paddr, int size, Flags flags, MasterID mid, Tick time, Addr pc)
+ Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
+ Addr pc)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
_extraData(0), _contextId(0), _threadId(0), _pc(0),
_pc = pc;
}
- Request(int asid, Addr vaddr, int size, Flags flags, MasterID mid, Addr pc,
+ Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
+ Addr pc,
int cid, ThreadID tid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
}
- /**
- * Set up a physical (e.g. device) request in a previously
- * allocated Request object.
- */
- void
- setPhys(Addr paddr, int size, Flags flags, MasterID mid, Tick time)
- {
- assert(size >= 0);
- _paddr = paddr;
- _size = size;
- _time = time;
- _masterId = mid;
- _flags.clear(~STICKY_FLAGS);
- _flags.set(flags);
- privateFlags.clear(~STICKY_PRIVATE_FLAGS);
- privateFlags.set(VALID_PADDR|VALID_SIZE);
- depth = 0;
- accessDelta = 0;
- //translateDelta = 0;
- }
-
- void
- setPhys(Addr paddr, int size, Flags flags, MasterID mid)
- {
- setPhys(paddr, size, flags, mid, curTick());
- }
-
/**
* Set up a virtual (e.g., CPU) request in a previously
* allocated Request object.
*/
void
- setVirt(int asid, Addr vaddr, int size, Flags flags, MasterID mid, Addr pc)
+ setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
+ Addr pc)
{
assert(size >= 0);
_asid = asid;
assert(privateFlags.isSet(VALID_VADDR));
assert(privateFlags.noneSet(VALID_PADDR));
assert(split_addr > _vaddr && split_addr < _vaddr + _size);
- req1 = new Request;
- *req1 = *this;
- req2 = new Request;
- *req2 = *this;
+ req1 = new Request(*this);
+ req2 = new Request(*this);
req1->_size = split_addr - _vaddr;
req2->_vaddr = split_addr;
req2->_size = _size - req1->_size;
for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
rec_bytes_read += RubySystem::getBlockSizeBytes()) {
- Request* req = new Request();
+ Request* req = nullptr;
MemCmd::Command requestType;
if (traceRecord->m_type == RubyRequestType_LD) {
requestType = MemCmd::ReadReq;
- req->setPhys(traceRecord->m_data_address + rec_bytes_read,
+ req = new Request(traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
} else if (traceRecord->m_type == RubyRequestType_IFETCH) {
requestType = MemCmd::ReadReq;
- req->setPhys(traceRecord->m_data_address + rec_bytes_read,
+ req = new Request(traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(),
Request::INST_FETCH, Request::funcMasterId);
} else {
requestType = MemCmd::WriteReq;
- req->setPhys(traceRecord->m_data_address + rec_bytes_read,
+ req = new Request(traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
}