parser.add_option("--stats-root", action="append", default=[], help=
"If given, dump only stats of objects under the given SimObject. "
"SimObjects are identified with Python notation as in: "
- "system.cpu[0].dtb. All elements of an array can be selected at "
- "once with: system.cpu[:].dtb. If given multiple times, dump stats "
+ "system.cpu[0].mmu. All elements of an array can be selected at "
+ "once with: system.cpu[:].mmu. If given multiple times, dump stats "
"that are present under any of the roots. If not given, dump all "
"stats. "
)
-# Copyright (c) 2014-2017 ARM Limited
+# Copyright (c) 2014-2017, 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
class HPI_ITB(ArmITB):
size = 256
+class HPI_MMU(ArmMMU):
+ itb = HPI_ITB()
+ dtb = HPI_DTB()
+
class HPI_WalkCache(Cache):
data_latency = 4
tag_latency = 4
branchPred = HPI_BP()
- itb = HPI_ITB()
- dtb = HPI_DTB()
+ mmu = HPI_MMU()
__all__ = [
"HPI_BP",
system.cpu[i].interrupts[0].int_master = system.piobus.slave
system.cpu[i].interrupts[0].int_slave = system.piobus.master
if fast_forward:
- system.cpu[i].itb.walker.port = ruby_port.slave
- system.cpu[i].dtb.walker.port = ruby_port.slave
+ system.cpu[i].mmu.connectWalkerPorts(
+ ruby_port.slave, ruby_port.slave)
# attach CU ports to Ruby
# Because of the peculiarities of the CP core, you may have 1 CPU but 2
int_cls = ArmPPI if pint < 32 else ArmSPI
for isa in cpu.isa:
isa.pmu = ArmPMU(interrupt=int_cls(num=pint))
- isa.pmu.addArchEvents(cpu=cpu, itb=cpu.itb, dtb=cpu.dtb,
+ isa.pmu.addArchEvents(cpu=cpu,
+ itb=cpu.mmu.itb, dtb=cpu.mmu.dtb,
icache=getattr(cpu, 'icache', None),
dcache=getattr(cpu, 'dcache', None),
l2cache=getattr(self, 'l2', None))
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
- if buildEnv['TARGET_ISA'] in ("x86", "arm"):
- cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
- cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
+ cpu.mmu.connectWalkerPorts(
+ test_sys.ruby._cpu_ports[i].slave,
+ test_sys.ruby._cpu_ports[i].slave)
if buildEnv['TARGET_ISA'] in "x86":
cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master
system.cpu[i].interrupts[0].pio = ruby_port.master
system.cpu[i].interrupts[0].int_master = ruby_port.slave
system.cpu[i].interrupts[0].int_slave = ruby_port.master
- system.cpu[i].itb.walker.port = ruby_port.slave
- system.cpu[i].dtb.walker.port = ruby_port.slave
+ system.cpu[i].mmu.connectWalkerPorts(
+ ruby_port.slave, ruby_port.slave)
else:
MemClass = Simulation.setMemClass(options)
system.membus = SystemXBar()
for i,cpu in enumerate(cpus):
cpu.icache_port = self.sequencers[i].slave
cpu.dcache_port = self.sequencers[i].slave
+ cpu.mmu.connectWalkerPorts(
+ self.sequencers[i].slave, self.sequencers[i].slave)
isa = buildEnv['TARGET_ISA']
if isa == 'x86':
cpu.interrupts[0].pio = self.sequencers[i].master
cpu.interrupts[0].int_master = self.sequencers[i].slave
cpu.interrupts[0].int_slave = self.sequencers[i].master
- if isa == 'x86' or isa == 'arm':
- cpu.itb.walker.port = self.sequencers[i].slave
- cpu.dtb.walker.port = self.sequencers[i].slave
class L1Cache(L1Cache_Controller):
for i,cpu in enumerate(cpus):
cpu.icache_port = self.sequencers[i].slave
cpu.dcache_port = self.sequencers[i].slave
+ cpu.mmu.connectWalkerPorts(
+ self.sequencers[i].slave, self.sequencers[i].slave)
isa = buildEnv['TARGET_ISA']
if isa == 'x86':
cpu.interrupts[0].pio = self.sequencers[i].master
cpu.interrupts[0].int_master = self.sequencers[i].slave
cpu.interrupts[0].int_slave = self.sequencers[i].master
- if isa == 'x86' or isa == 'arm':
- cpu.itb.walker.port = self.sequencers[i].slave
- cpu.dtb.walker.port = self.sequencers[i].slave
class L1Cache(L1Cache_Controller):
cxx_header = 'arch/arm/mmu.hh'
itb = ArmITB()
dtb = ArmDTB()
+
+ @classmethod
+ def walkerPorts(cls):
+ return ["mmu.itb.walker.port", "mmu.dtb.walker.port"]
+
+ def connectWalkerPorts(self, iport, dport):
+ self.itb.walker.port = iport
+ self.dtb.walker.port = dport
{
CortexA76TC::CortexA76TC(::BaseCPU *cpu, int id, System *system,
- ::BaseTLB *dtb, ::BaseTLB *itb, ::BaseISA *isa,
+ ::BaseMMU *mmu, ::BaseISA *isa,
iris::IrisConnectionInterface *iris_if,
const std::string &iris_path) :
- ThreadContext(cpu, id, system, dtb, itb, isa, iris_if, iris_path)
+ ThreadContext(cpu, id, system, mmu, isa, iris_if, iris_path)
{}
bool
public:
CortexA76TC(::BaseCPU *cpu, int id, System *system,
- ::BaseTLB *dtb, ::BaseTLB *itb, ::BaseISA *isa,
+ ::BaseMMU *mmu, ::BaseISA *isa,
iris::IrisConnectionInterface *iris_if,
const std::string &iris_path);
thread_paths = VectorParam.String(
"Sub-paths to elements in the EVS which support a thread context")
- dtb = IrisTLB()
- itb = IrisTLB()
+ mmu = IrisMMU()
def createThreads(self):
if len(self.isa) == 0:
for (const std::string &sub_path: params.thread_paths) {
std::string path = parent_path + "." + sub_path;
auto id = thread_id++;
- auto *tc = new TC(this, id, sys, params.dtb, params.itb,
+ auto *tc = new TC(this, id, sys, params.mmu,
params.isa[id], iris_if, path);
threadContexts.push_back(tc);
}
}
ThreadContext::ThreadContext(
- BaseCPU *cpu, int id, System *system, ::BaseTLB *dtb, ::BaseTLB *itb,
+ BaseCPU *cpu, int id, System *system, ::BaseMMU *mmu,
BaseISA *isa, iris::IrisConnectionInterface *iris_if,
const std::string &iris_path) :
- _cpu(cpu), _threadId(id), _system(system), _dtb(dtb), _itb(itb), _isa(isa),
+ _cpu(cpu), _threadId(id), _system(system), _mmu(mmu), _isa(isa),
_irisPath(iris_path), vecRegs(ArmISA::NumVecRegs),
vecPredRegs(ArmISA::NumVecPredRegs),
comInstEventQueue("instruction-based event queue"),
int _threadId;
ContextID _contextId;
System *_system;
- ::BaseTLB *_dtb;
- ::BaseTLB *_itb;
+ ::BaseMMU *_mmu;
::BaseISA *_isa;
std::string _irisPath;
public:
ThreadContext(::BaseCPU *cpu, int id, System *system,
- ::BaseTLB *dtb, ::BaseTLB *itb, ::BaseISA *isa,
+ ::BaseMMU *mmu, ::BaseISA *isa,
iris::IrisConnectionInterface *iris_if,
const std::string &iris_path);
virtual ~ThreadContext();
{
return _dtb;
}
+ BaseMMU *
+ getMMUPtr() override
+ {
+ return _mmu;
+ }
+
CheckerCPU *getCheckerCpuPtr() override { return nullptr; }
ArmISA::Decoder *
getDecoderPtr() override
cxx_header = "arch/generic/mmu.hh"
itb = Param.BaseTLB("Instruction TLB")
dtb = Param.BaseTLB("Data TLB")
+
+ @classmethod
+ def walkerPorts(cls):
+ # This classmethod is used by the BaseCPU. It should return
+ # a list of strings: the table walker ports to be assigned
+ # to the _cached_ports variable. The method should be removed once
+ # we remove the _cached_ports methodology of composing
+ # cache hierarchies
+ return []
+
+ def connectWalkerPorts(self, iport, dport):
+ """
+ Connect the instruction and data table walkers
+ to the ports passed as arguments.
+ An ISA specific MMU should override
+ this method, which is doing nothing to support ISAs
+ not implementing a table walker
+
+ :param iport: Port to be connected to the instruction
+ table walker port
+ :param dport: Port to be connected to the data
+ table walker port
+ """
+ pass
cxx_header = 'arch/riscv/mmu.hh'
itb = RiscvTLB()
dtb = RiscvTLB()
+
+ @classmethod
+ def walkerPorts(cls):
+ return ["mmu.itb.walker.port", "mmu.dtb.walker.port"]
+
+ def connectWalkerPorts(self, iport, dport):
+ self.itb.walker.port = iport
+ self.dtb.walker.port = dport
cxx_header = 'arch/x86/mmu.hh'
itb = X86TLB()
dtb = X86TLB()
+
+ @classmethod
+ def walkerPorts(cls):
+ return ["mmu.itb.walker.port", "mmu.dtb.walker.port"]
+
+ def connectWalkerPorts(self, iport, dport):
+ self.itb.walker.port = iport
+ self.dtb.walker.port = dport
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'sparc':
- from m5.objects.SparcTLB import SparcTLB as ArchDTB, SparcTLB as ArchITB
+ from m5.objects.SparcMMU import SparcMMU as ArchMMU
from m5.objects.SparcInterrupts import SparcInterrupts as ArchInterrupts
from m5.objects.SparcISA import SparcISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'x86':
- from m5.objects.X86TLB import X86TLB as ArchDTB, X86TLB as ArchITB
+ from m5.objects.X86MMU import X86MMU as ArchMMU
from m5.objects.X86LocalApic import X86LocalApic as ArchInterrupts
from m5.objects.X86ISA import X86ISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'mips':
- from m5.objects.MipsTLB import MipsTLB as ArchDTB, MipsTLB as ArchITB
+ from m5.objects.MipsMMU import MipsMMU as ArchMMU
from m5.objects.MipsInterrupts import MipsInterrupts as ArchInterrupts
from m5.objects.MipsISA import MipsISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'arm':
- from m5.objects.ArmTLB import ArmDTB as ArchDTB, ArmITB as ArchITB
+ from m5.objects.ArmMMU import ArmMMU as ArchMMU
from m5.objects.ArmInterrupts import ArmInterrupts as ArchInterrupts
from m5.objects.ArmISA import ArmISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'power':
- from m5.objects.PowerTLB import PowerTLB as ArchDTB, PowerTLB as ArchITB
+ from m5.objects.PowerMMU import PowerMMU as ArchMMU
from m5.objects.PowerInterrupts import PowerInterrupts as ArchInterrupts
from m5.objects.PowerISA import PowerISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'riscv':
- from m5.objects.RiscvTLB import RiscvTLB as ArchDTB, RiscvTLB as ArchITB
+ from m5.objects.RiscvMMU import RiscvMMU as ArchMMU
from m5.objects.RiscvInterrupts import RiscvInterrupts as ArchInterrupts
from m5.objects.RiscvISA import RiscvISA as ArchISA
else:
workload = VectorParam.Process([], "processes to run")
- dtb = Param.BaseTLB(ArchDTB(), "Data TLB")
- itb = Param.BaseTLB(ArchITB(), "Instruction TLB")
+ mmu = Param.BaseMMU(ArchMMU(), "CPU memory management unit")
if buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
interrupts = VectorParam.BaseInterrupts([], "Interrupt Controller")
dcache_port = RequestPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
- if buildEnv['TARGET_ISA'] in ['x86', 'arm', 'riscv']:
- _cached_ports += ["itb.walker.port", "dtb.walker.port"]
+ _cached_ports += ArchMMU.walkerPorts()
_uncached_interrupt_response_ports = []
_uncached_interrupt_request_ports = []
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
- self.itb.walker.port = iwc.cpu_side
- self.dtb.walker.port = dwc.cpu_side
+ self.mmu.connectWalkerPorts(
+ iwc.cpu_side, dwc.cpu_side)
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
- self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
+ self._cached_ports += ArchMMU.walkerPorts()
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
- self._cached_ports += ["checker.itb.walker.port", \
- "checker.dtb.walker.port"]
+ self._cached_ports += [ ".".join("checker", port) \
+ for port in ArchMMU.walkerPorts() ]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc=None, dwc=None,
xbar=None):
}
void
-BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb)
+BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseMMU *mmu)
{
assert(tid < numThreads);
AddressMonitor &monitor = addressMonitor[tid];
req->setVirt(addr, size, 0x0, dataRequestorId(), tc->instAddr());
// translate to physical address
- Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
+ Fault fault = mmu->translateAtomic(req, tc, BaseTLB::Read);
assert(fault == NoFault);
monitor.pAddr = req->getPaddr() & mask;
ThreadContext::compare(oldTC, newTC);
*/
- Port *old_itb_port = oldTC->getITBPtr()->getTableWalkerPort();
- Port *old_dtb_port = oldTC->getDTBPtr()->getTableWalkerPort();
- Port *new_itb_port = newTC->getITBPtr()->getTableWalkerPort();
- Port *new_dtb_port = newTC->getDTBPtr()->getTableWalkerPort();
-
- // Move over any table walker ports if they exist
- if (new_itb_port)
- new_itb_port->takeOverFrom(old_itb_port);
- if (new_dtb_port)
- new_dtb_port->takeOverFrom(old_dtb_port);
- newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
- newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
+ newTC->getMMUPtr()->takeOverFrom(oldTC->getMMUPtr());
// Checker whether or not we have to transfer CheckerCPU
// objects over in the switch
- CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
- CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
- if (oldChecker && newChecker) {
- Port *old_checker_itb_port =
- oldChecker->getITBPtr()->getTableWalkerPort();
- Port *old_checker_dtb_port =
- oldChecker->getDTBPtr()->getTableWalkerPort();
- Port *new_checker_itb_port =
- newChecker->getITBPtr()->getTableWalkerPort();
- Port *new_checker_dtb_port =
- newChecker->getDTBPtr()->getTableWalkerPort();
-
- newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
- newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
-
- // Move over any table walker ports if they exist for checker
- if (new_checker_itb_port)
- new_checker_itb_port->takeOverFrom(old_checker_itb_port);
- if (new_checker_dtb_port)
- new_checker_dtb_port->takeOverFrom(old_checker_dtb_port);
+ CheckerCPU *old_checker = oldTC->getCheckerCpuPtr();
+ CheckerCPU *new_checker = newTC->getCheckerCpuPtr();
+ if (old_checker && new_checker) {
+ new_checker->getMMUPtr()->takeOverFrom(old_checker->getMMUPtr());
}
}
ThreadContext &tc(*threadContexts[i]);
CheckerCPU *checker(tc.getCheckerCpuPtr());
- tc.getITBPtr()->flushAll();
- tc.getDTBPtr()->flushAll();
+ tc.getMMUPtr()->flushAll();
if (checker) {
- checker->getITBPtr()->flushAll();
- checker->getDTBPtr()->flushAll();
+ checker->getMMUPtr()->flushAll();
}
}
}
public:
void armMonitor(ThreadID tid, Addr address);
bool mwait(ThreadID tid, PacketPtr pkt);
- void mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb);
+ void mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseMMU *mmu);
AddressMonitor *getCpuAddrMonitor(ThreadID tid)
{
assert(tid < numThreads);
void
mwaitAtomic(ThreadContext *tc) override
{
- return cpu->mwaitAtomic(threadNumber, tc, cpu->dtb);
+ return cpu->mwaitAtomic(threadNumber, tc, cpu->mmu);
}
AddressMonitor *
getAddrMonitor() override
exitOnError = p.exitOnError;
warnOnlyOnLoadError = p.warnOnlyOnLoadError;
- itb = p.itb;
- dtb = p.dtb;
+ mmu = p.mmu;
workload = p.workload;
updateOnError = true;
systemPtr = system;
if (FullSystem) {
- thread = new SimpleThread(this, 0, systemPtr, itb, dtb, p.isa[0]);
+ thread = new SimpleThread(this, 0, systemPtr, mmu, p.isa[0]);
} else {
thread = new SimpleThread(this, 0, systemPtr,
workload.size() ? workload[0] : NULL,
- itb, dtb, p.isa[0]);
+ mmu, p.isa[0]);
}
tc = thread->getTC();
// translate to physical address
if (predicate) {
- fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Read);
+ fault = mmu->translateFunctional(mem_req, tc, BaseTLB::Read);
}
if (predicate && !checked_flags && fault == NoFault && unverifiedReq) {
predicate = (mem_req != nullptr);
if (predicate) {
- fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Write);
+ fault = mmu->translateFunctional(mem_req, tc, BaseTLB::Write);
}
if (predicate && !checked_flags && fault == NoFault && unverifiedReq) {
ThreadContext *tc;
- BaseTLB *itb;
- BaseTLB *dtb;
+ BaseMMU *mmu;
// ISAs like ARM can have multiple destination registers to check,
// keep them all in a std::queue
// Primary thread being run.
SimpleThread *thread;
- BaseTLB* getITBPtr() { return itb; }
- BaseTLB* getDTBPtr() { return dtb; }
+ BaseTLB* getITBPtr() { return mmu->itb; }
+ BaseTLB* getDTBPtr() { return mmu->dtb; }
+
+ BaseMMU* getMMUPtr() { return mmu; }
virtual Counter totalInsts() const override
{
void
demapPage(Addr vaddr, uint64_t asn) override
{
- this->itb->demapPage(vaddr, asn);
- this->dtb->demapPage(vaddr, asn);
+ mmu->demapPage(vaddr, asn);
}
// monitor/mwait funtions
void armMonitor(Addr address) override { BaseCPU::armMonitor(0, address); }
bool mwait(PacketPtr pkt) override { return BaseCPU::mwait(0, pkt); }
- void mwaitAtomic(ThreadContext *tc) override
- { return BaseCPU::mwaitAtomic(0, tc, thread->dtb); }
+
+ void
+ mwaitAtomic(ThreadContext *tc) override
+ {
+ return BaseCPU::mwaitAtomic(0, tc, thread->mmu);
+ }
+
AddressMonitor *getAddrMonitor() override
{ return BaseCPU::getCpuAddrMonitor(0); }
void
demapInstPage(Addr vaddr, uint64_t asn)
{
- this->itb->demapPage(vaddr, asn);
+ mmu->itb->demapPage(vaddr, asn);
}
void
demapDataPage(Addr vaddr, uint64_t asn)
{
- this->dtb->demapPage(vaddr, asn);
+ mmu->dtb->demapPage(vaddr, asn);
}
/**
Request::INST_FETCH, requestorId,
thread->instAddr());
- fault = itb->translateFunctional(
+ fault = mmu->translateFunctional(
mem_req, tc, BaseTLB::Execute);
if (fault != NoFault) {
BaseTLB *getDTBPtr() override { return actualTC->getDTBPtr(); }
+ BaseMMU *getMMUPtr() override { return actualTC->getMMUPtr(); }
+
CheckerCPU *
getCheckerCpuPtr() override
{
panic("KVM: Failed to determine host page size (%i)\n",
errno);
- if (FullSystem) {
- thread = new SimpleThread(this, 0, params.system, params.itb,
- params.dtb, params.isa[0]);
- } else {
+ if (FullSystem)
+ thread = new SimpleThread(this, 0, params.system, params.mmu,
+ params.isa[0]);
+ else
thread = new SimpleThread(this, /* thread_num */ 0, params.system,
- params.workload[0], params.itb,
- params.dtb, params.isa[0]);
- }
+ params.workload[0], params.mmu,
+ params.isa[0]);
thread->setStatus(ThreadContext::Halted);
tc = thread->getTC();
// APIC accesses on x86 and m5ops where supported through a MMIO
// interface.
BaseTLB::Mode tlb_mode(write ? BaseTLB::Write : BaseTLB::Read);
- Fault fault(tc->getDTBPtr()->finalizePhysical(mmio_req, tc, tlb_mode));
+ Fault fault(tc->getMMUPtr()->finalizePhysical(mmio_req, tc, tlb_mode));
if (fault != NoFault)
warn("Finalization of MMIO address failed: %s\n", fault->name());
for (ThreadID i = 0; i < numThreads; i++) {
if (FullSystem) {
thread = new Minor::MinorThread(this, i, params.system,
- params.itb, params.dtb, params.isa[i]);
+ params.mmu, params.isa[i]);
thread->setStatus(ThreadContext::Halted);
} else {
thread = new Minor::MinorThread(this, i, params.system,
- params.workload[i], params.itb, params.dtb,
+ params.workload[i], params.mmu,
params.isa[i]);
}
void
demapPage(Addr vaddr, uint64_t asn) override
{
- thread.getITBPtr()->demapPage(vaddr, asn);
- thread.getDTBPtr()->demapPage(vaddr, asn);
+ thread.getMMUPtr()->demapPage(vaddr, asn);
}
RegVal
public:
// monitor/mwait funtions
- void armMonitor(Addr address) override
- { getCpuPtr()->armMonitor(inst->id.threadId, address); }
+ void
+ armMonitor(Addr address) override
+ {
+ getCpuPtr()->armMonitor(inst->id.threadId, address);
+ }
- bool mwait(PacketPtr pkt) override
- { return getCpuPtr()->mwait(inst->id.threadId, pkt); }
+ bool
+ mwait(PacketPtr pkt) override
+ {
+ return getCpuPtr()->mwait(inst->id.threadId, pkt);
+ }
- void mwaitAtomic(ThreadContext *tc) override
- { return getCpuPtr()->mwaitAtomic(inst->id.threadId, tc, thread.dtb); }
+ void
+ mwaitAtomic(ThreadContext *tc) override
+ {
+ return getCpuPtr()->mwaitAtomic(inst->id.threadId, tc, thread.mmu);
+ }
- AddressMonitor *getAddrMonitor() override
- { return getCpuPtr()->getCpuAddrMonitor(inst->id.threadId); }
+ AddressMonitor *
+ getAddrMonitor() override
+ {
+ return getCpuPtr()->getCpuAddrMonitor(inst->id.threadId);
+ }
};
}
/* Submit the translation request. The response will come
* through finish/markDelayed on this request as it bears
* the Translation interface */
- cpu.threads[request->id.threadId]->itb->translateTiming(
+ cpu.threads[request->id.threadId]->mmu->translateTiming(
request->request,
cpu.getContext(request->id.threadId),
request, BaseTLB::Execute);
/* Submit the translation request. The response will come through
* finish/markDelayed on the LSQRequest as it bears the Translation
* interface */
- thread->getDTBPtr()->translateTiming(
+ thread->getMMUPtr()->translateTiming(
request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
} else {
disableMemAccess();
port.numAccessesInDTLB++;
numInTranslationFragments++;
- thread->getDTBPtr()->translateTiming(
+ thread->getMMUPtr()->translateTiming(
fragmentRequests[fragment_index], thread, this, (isLoad ?
BaseTLB::Read : BaseTLB::Write));
}
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
- from m5.objects.ArmTLB import ArmDTB, ArmITB
+ from m5.objects.ArmTLB import ArmMMU
self.checker = O3Checker(workload=self.workload,
exitOnError=False,
updateOnError=True,
warnOnlyOnLoadError=True)
- self.checker.itb = ArmITB(size = self.itb.size)
- self.checker.dtb = ArmDTB(size = self.dtb.size)
+ self.checker.mmu = ArmMMU()
+ self.checker.mmu.itb.size = self.mmu.itb.size
+ self.checker.mmu.dtb.size = self.mmu.dtb.size
self.checker.cpu_id = self.cpu_id
else:
template <class Impl>
FullO3CPU<Impl>::FullO3CPU(const DerivO3CPUParams ¶ms)
: BaseO3CPU(params),
- itb(params.itb),
- dtb(params.dtb),
+ mmu(params.mmu),
tickEvent([this]{ tick(); }, "FullO3CPU tick",
false, Event::CPU_Tick_Pri),
threadExitEvent([this]{ exitThreads(); }, "FullO3CPU exit threads",
SwitchedOut
};
- BaseTLB *itb;
- BaseTLB *dtb;
+ BaseMMU *mmu;
using LSQRequest = typename LSQ<Impl>::LSQRequest;
/** Overall CPU status. */
/** Register probe points. */
void regProbePoints() override;
- void demapPage(Addr vaddr, uint64_t asn)
+ void
+ demapPage(Addr vaddr, uint64_t asn)
{
- this->itb->demapPage(vaddr, asn);
- this->dtb->demapPage(vaddr, asn);
+ mmu->demapPage(vaddr, asn);
}
void demapInstPage(Addr vaddr, uint64_t asn)
{
- this->itb->demapPage(vaddr, asn);
+ mmu->itb->demapPage(vaddr, asn);
}
void demapDataPage(Addr vaddr, uint64_t asn)
{
- this->dtb->demapPage(vaddr, asn);
+ mmu->dtb->demapPage(vaddr, asn);
}
/** Ticks CPU, calling tick() on each stage, and checking the overall
// Initiate translation of the icache block
fetchStatus[tid] = ItlbWait;
FetchTranslation *trans = new FetchTranslation(this);
- cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(),
+ cpu->mmu->translateTiming(mem_req, cpu->thread[tid]->getTC(),
trans, BaseTLB::Execute);
return true;
}
LSQ<Impl>::LSQRequest::sendFragmentToTranslation(int i)
{
numInTranslationFragments++;
- _port.dTLB()->translateTiming(
+ _port.getMMUPtr()->translateTiming(
this->request(i),
this->_inst->thread->getTC(), this,
this->isLoad() ? BaseTLB::Read : BaseTLB::Write);
/** Schedule event for the cpu. */
void schedule(Event& ev, Tick when) { cpu->schedule(ev, when); }
- BaseTLB* dTLB() { return cpu->dtb; }
+ BaseMMU* getMMUPtr() { return cpu->mmu; }
private:
/** Pointer to the CPU. */
O3ThreadState<Impl> *thread;
/** Returns a pointer to the ITB. */
- BaseTLB *getITBPtr() override { return cpu->itb; }
+ BaseTLB *getITBPtr() override { return cpu->mmu->itb; }
/** Returns a pointer to the DTB. */
- BaseTLB *getDTBPtr() override { return cpu->dtb; }
+ BaseTLB *getDTBPtr() override { return cpu->mmu->dtb; }
+
+ /** Returns a pointer to the MMU. */
+ BaseMMU *getMMUPtr() override { return cpu->mmu; }
CheckerCPU *getCheckerCpuPtr() override { return NULL; }
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
- from m5.objects.ArmTLB import ArmITB, ArmDTB
+ from m5.objects.ArmTLB import ArmMMU
self.checker = DummyChecker(workload = self.workload)
- self.checker.itb = ArmITB(size = self.itb.size)
- self.checker.dtb = ArmDTB(size = self.dtb.size)
+ self.checker.mmu = ArmMMU()
+ self.checker.mmu.itb.size = self.mmu.itb.size
+ self.checker.mmu.dtb.size = self.mmu.dtb.size
else:
print("ERROR: Checker only supported under ARM ISA!")
exit(1)
// translate to physical address
if (predicate) {
- fault = thread->dtb->translateAtomic(req, thread->getTC(),
+ fault = thread->mmu->translateAtomic(req, thread->getTC(),
BaseTLB::Read);
}
// translate to physical address
if (predicate)
- fault = thread->dtb->translateAtomic(req, thread->getTC(),
+ fault = thread->mmu->translateAtomic(req, thread->getTC(),
BaseTLB::Write);
// Now do the access.
thread->pcState().instAddr(), std::move(amo_op));
// translate to physical address
- Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
- BaseTLB::Write);
+ Fault fault = thread->mmu->translateAtomic(
+ req, thread->getTC(), BaseTLB::Write);
// Now do the access.
if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
if (needToFetch) {
ifetch_req->taskId(taskId());
setupFetchRequest(ifetch_req);
- fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
+ fault = thread->mmu->translateAtomic(ifetch_req, thread->getTC(),
BaseTLB::Execute);
}
for (unsigned i = 0; i < numThreads; i++) {
if (FullSystem) {
- thread = new SimpleThread(this, i, p.system,
- p.itb, p.dtb, p.isa[i]);
+ thread = new SimpleThread(
+ this, i, p.system, p.mmu, p.isa[i]);
} else {
- thread = new SimpleThread(this, i, p.system, p.workload[i],
- p.itb, p.dtb, p.isa[i]);
+ thread = new SimpleThread(
+ this, i, p.system, p.workload[i], p.mmu, p.isa[i]);
}
threadInfo.push_back(new SimpleExecContext(this, thread));
ThreadContext *tc = thread->getTC();
void
mwaitAtomic(ThreadContext *tc) override
{
- cpu->mwaitAtomic(thread->threadId(), tc, thread->dtb);
+ cpu->mwaitAtomic(thread->threadId(), tc, thread->mmu);
}
AddressMonitor *
DataTranslation<TimingSimpleCPU *> *trans2 =
new DataTranslation<TimingSimpleCPU *>(this, state, 1);
- thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode);
- thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode);
+ thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
+ thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
} else {
WholeTranslationState *state =
new WholeTranslationState(req, new uint8_t[size], NULL, mode);
DataTranslation<TimingSimpleCPU *> *translation
= new DataTranslation<TimingSimpleCPU *>(this, state);
- thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
+ thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
}
return NoFault;
DataTranslation<TimingSimpleCPU *> *trans2 =
new DataTranslation<TimingSimpleCPU *>(this, state, 1);
- thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode);
- thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode);
+ thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
+ thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
} else {
WholeTranslationState *state =
new WholeTranslationState(req, newData, res, mode);
DataTranslation<TimingSimpleCPU *> *translation =
new DataTranslation<TimingSimpleCPU *>(this, state);
- thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
+ thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
}
// Translation faults will be returned via finishTranslation()
new WholeTranslationState(req, new uint8_t[size], NULL, mode);
DataTranslation<TimingSimpleCPU *> *translation
= new DataTranslation<TimingSimpleCPU *>(this, state);
- thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
+ thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
return NoFault;
}
ifetch_req->setContext(thread->contextId());
setupFetchRequest(ifetch_req);
DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
- thread->itb->translateTiming(ifetch_req, thread->getTC(),
+ thread->mmu->translateTiming(ifetch_req, thread->getTC(),
&fetchTranslation, BaseTLB::Execute);
} else {
_status = IcacheWaitResponse;
// constructor
SimpleThread::SimpleThread(BaseCPU *_cpu, int _thread_num, System *_sys,
- Process *_process, BaseTLB *_itb,
- BaseTLB *_dtb, BaseISA *_isa)
+ Process *_process, BaseMMU *_mmu,
+ BaseISA *_isa)
: ThreadState(_cpu, _thread_num, _process),
isa(dynamic_cast<TheISA::ISA *>(_isa)),
predicate(true), memAccPredicate(true),
comInstEventQueue("instruction-based event queue"),
- system(_sys), itb(_itb), dtb(_dtb), decoder(isa),
+ system(_sys), mmu(_mmu), decoder(isa),
htmTransactionStarts(0), htmTransactionStops(0)
{
assert(isa);
}
SimpleThread::SimpleThread(BaseCPU *_cpu, int _thread_num, System *_sys,
- BaseTLB *_itb, BaseTLB *_dtb, BaseISA *_isa)
+ BaseMMU *_mmu, BaseISA *_isa)
: ThreadState(_cpu, _thread_num, NULL),
isa(dynamic_cast<TheISA::ISA *>(_isa)),
predicate(true), memAccPredicate(true),
comInstEventQueue("instruction-based event queue"),
- system(_sys), itb(_itb), dtb(_dtb), decoder(isa),
+ system(_sys), mmu(_mmu), decoder(isa),
htmTransactionStarts(0), htmTransactionStops(0)
{
assert(isa);
#include "arch/decoder.hh"
#include "arch/generic/htm.hh"
+#include "arch/generic/mmu.hh"
#include "arch/generic/tlb.hh"
#include "arch/isa.hh"
#include "arch/registers.hh"
System *system;
- BaseTLB *itb;
- BaseTLB *dtb;
+ BaseMMU *mmu;
TheISA::Decoder decoder;
// constructor: initialize SimpleThread from given process structure
// FS
SimpleThread(BaseCPU *_cpu, int _thread_num, System *_system,
- BaseTLB *_itb, BaseTLB *_dtb, BaseISA *_isa);
+ BaseMMU *_mmu, BaseISA *_isa);
// SE
SimpleThread(BaseCPU *_cpu, int _thread_num, System *_system,
- Process *_process, BaseTLB *_itb, BaseTLB *_dtb,
+ Process *_process, BaseMMU *_mmu,
BaseISA *_isa);
virtual ~SimpleThread() {}
*/
ThreadContext *getTC() { return this; }
- void demapPage(Addr vaddr, uint64_t asn)
+ void
+ demapPage(Addr vaddr, uint64_t asn)
{
- itb->demapPage(vaddr, asn);
- dtb->demapPage(vaddr, asn);
+ mmu->demapPage(vaddr, asn);
}
void demapInstPage(Addr vaddr, uint64_t asn)
{
- itb->demapPage(vaddr, asn);
+ mmu->itb->demapPage(vaddr, asn);
}
void demapDataPage(Addr vaddr, uint64_t asn)
{
- dtb->demapPage(vaddr, asn);
+ mmu->dtb->demapPage(vaddr, asn);
}
/*******************************************
ContextID contextId() const override { return ThreadState::contextId(); }
void setContextId(ContextID id) override { ThreadState::setContextId(id); }
- BaseTLB *getITBPtr() override { return itb; }
+ BaseTLB *getITBPtr() override { return mmu->itb; }
+
+ BaseTLB *getDTBPtr() override { return mmu->dtb; }
- BaseTLB *getDTBPtr() override { return dtb; }
+ BaseMMU *getMMUPtr() override { return mmu; }
CheckerCPU *getCheckerCpuPtr() override { return NULL; }
class Decoder;
}
class BaseCPU;
+class BaseMMU;
class BaseTLB;
class CheckerCPU;
class Checkpoint;
virtual BaseTLB *getDTBPtr() = 0;
+ virtual BaseMMU *getMMUPtr() = 0;
+
virtual CheckerCPU *getCheckerCpuPtr() = 0;
virtual BaseISA *getIsaPtr() = 0;
# Tie the cpu ports to the correct ruby system ports
cpu.icache_port = system.ruby._cpu_ports[i].slave
cpu.dcache_port = system.ruby._cpu_ports[i].slave
- cpu.itb.walker.port = system.ruby._cpu_ports[i].slave
- cpu.dtb.walker.port = system.ruby._cpu_ports[i].slave
+ cpu.mmu.connectWalkerPorts(
+ system.ruby._cpu_ports[i].slave, system.ruby._cpu_ports[i].slave)
cpu.interrupts[0].pio = system.ruby._cpu_ports[i].master
cpu.interrupts[0].int_master = system.ruby._cpu_ports[i].slave
"""
self.mmubus = L2XBar()
self.cpu_side = self.mmubus.master
- for tlb in [cpu.itb, cpu.dtb]:
- self.mmubus.slave = tlb.walker.port
+ cpu.mmu.connectWalkerPorts(
+ self.mmubus.slave, self.mmubus.slave)
def connectBus(self, bus):
"""Connect this cache to a memory-side bus"""