TableWalker::drainResume()
{
Drainable::drainResume();
- if ((params()->sys->getMemoryMode() == Enums::timing) && currState) {
+ if (params()->sys->isTimingMode() && currState) {
delete currState;
currState = NULL;
}
message.destMode = low.destMode;
message.level = low.level;
message.trigger = low.trigger;
- bool timing = sys->getMemoryMode() == Enums::timing;
+ bool timing(sys->isTimingMode());
// Be careful no updates of the delivery status bit get lost.
regs[APIC_INTERRUPT_COMMAND_LOW] = low;
ApicList apics;
// outstanding requests, see if this request can be coalesced with
// another one (i.e. either coalesce or start walk)
WalkerState * newState = new WalkerState(this, _translation, _req);
- newState->initState(_tc, _mode, sys->getMemoryMode() == Enums::timing);
+ newState->initState(_tc, _mode, sys->isTimingMode());
if (currStates.size()) {
assert(newState->isTiming());
DPRINTF(PageTableWalker, "Walks in progress: %d\n", currStates.size());
void
InOrderCPU::verifyMemoryMode() const
{
- if (system->getMemoryMode() != Enums::timing) {
+ if (!system->isTimingMode()) {
fatal("The in-order CPU requires the memory system to be in "
"'timing' mode.\n");
}
void
FullO3CPU<Impl>::verifyMemoryMode() const
{
- if (system->getMemoryMode() != Enums::timing) {
+ if (!system->isTimingMode()) {
fatal("The O3 CPU requires the memory system to be in "
"'timing' mode.\n");
}
void
AtomicSimpleCPU::verifyMemoryMode() const
{
- if (system->getMemoryMode() != Enums::atomic) {
+ if (!system->isAtomicMode()) {
fatal("The atomic CPU requires the memory system to be in "
"'atomic' mode.\n");
}
void
TimingSimpleCPU::verifyMemoryMode() const
{
- if (system->getMemoryMode() != Enums::timing) {
+ if (!system->isTimingMode()) {
fatal("The timing CPU requires the memory system to be in "
"'timing' mode.\n");
}
if (!port.isConnected())
fatal("The port of %s is not connected!\n", name());
- Enums::MemoryMode mode = system->getMemoryMode();
-
// if the system is in timing mode active the request generator
- if (mode == Enums::timing) {
+ if (system->isTimingMode()) {
DPRINTF(TrafficGen, "Timing mode, activating request generator\n");
// enter initial state
TrafficGen::initState()
{
// when not restoring from a checkpoint, make sure we kick things off
- if (system->getMemoryMode() == Enums::timing) {
+ if (system->isTimingMode()) {
Tick nextStateGraphEvent = stateGraph.nextEventTick();
schedule(updateStateGraphEvent, nextStateGraphEvent);
} else {
// switching actually work
assert(transmitList.size());
- Enums::MemoryMode state = sys->getMemoryMode();
- if (state == Enums::timing) {
+ if (sys->isTimingMode()) {
// if we are either waiting for a retry or are still waiting
// after sending the last packet, then do not proceed
if (inRetry || sendEvent.scheduled()) {
}
trySendTimingReq();
- } else if (state == Enums::atomic) {
+ } else if (sys->isAtomicMode()) {
// send everything there is to send in zero time
while (!transmitList.empty()) {
PacketPtr pkt = transmitList.front();
apics.push_back(selected);
}
}
- intMasterPort.sendMessage(apics, message,
- sys->getMemoryMode() == Enums::timing);
+ intMasterPort.sendMessage(apics, message, sys->isTimingMode());
}
}
# Andreas Hansson
from MemObject import MemObject
+from System import System
from m5.params import *
+from m5.proxy import *
class BaseBus(MemObject):
type = 'BaseBus'
class CoherentBus(BaseBus):
type = 'CoherentBus'
cxx_header = "mem/coherent_bus.hh"
+
+ system = Param.System(Parent.any, "System that the bus belongs to.")
// must be cache-to-cache response from upper to lower level
ForwardResponseRecord *rec =
dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
+ assert(!system->bypassCaches());
if (rec == NULL) {
assert(pkt->cmd == MemCmd::HardPFResp);
assert(pkt->isRequest());
+ // Just forward the packet if caches are disabled.
+ if (system->bypassCaches()) {
+ memSidePort->sendTimingReq(pkt);
+ return true;
+ }
+
if (pkt->memInhibitAsserted()) {
DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
pkt->getAddr());
// @TODO: make this a parameter
bool last_level_cache = false;
+ // Forward the request if the system is in cache bypass mode.
+ if (system->bypassCaches())
+ return memSidePort->sendAtomic(pkt);
+
if (pkt->memInhibitAsserted()) {
assert(!pkt->req->isUncacheable());
// have to invalidate ourselves and any lower caches even if
void
Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
{
+ if (system->bypassCaches()) {
+ // Packets from the memory side are snoop request and
+ // shouldn't happen in bypass mode.
+ assert(fromCpuSide);
+
+ // The cache should be flushed if we are in cache bypass mode,
+ // so we don't need to check if we need to update anything.
+ memSidePort->sendFunctional(pkt);
+ return;
+ }
+
Addr blk_addr = blockAlign(pkt->getAddr());
BlkType *blk = tags->findBlock(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr);
void
Cache<TagStore>::snoopTiming(PacketPtr pkt)
{
+ // Snoops shouldn't happen when bypassing caches
+ assert(!system->bypassCaches());
+
// Note that some deferred snoops don't have requests, since the
// original access may have already completed
if ((pkt->req && pkt->req->isUncacheable()) ||
Cycles
Cache<TagStore>::snoopAtomic(PacketPtr pkt)
{
+ // Snoops shouldn't happen when bypassing caches
+ assert(!system->bypassCaches());
+
if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
// Can't get a hit on an uncacheable address
// Revisit this for multi level coherence
{
// always let inhibited requests through even if blocked
if (!pkt->memInhibitAsserted() && blocked) {
+ assert(!cache->system->bypassCaches());
DPRINTF(Cache,"Scheduling a retry while blocked\n");
mustSendRetry = true;
return false;
#include "debug/BusAddrRanges.hh"
#include "debug/CoherentBus.hh"
#include "mem/coherent_bus.hh"
+#include "sim/system.hh"
CoherentBus::CoherentBus(const CoherentBusParams *p)
: BaseBus(p), reqLayer(*this, ".reqLayer", p->clock),
respLayer(*this, ".respLayer", p->clock),
- snoopRespLayer(*this, ".snoopRespLayer", p->clock)
+ snoopRespLayer(*this, ".snoopRespLayer", p->clock),
+ system(p->system)
{
// create the ports based on the size of the master and slave
// vector ports, and the presence of the default port, the ports
Tick packetFinishTime = is_express_snoop ? 0 : pkt->finishTime;
// uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable()) {
+ if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
// the packet is a memory-mapped request and should be
// broadcasted to our snoopers but the source
forwardTiming(pkt, slave_port_id);
void
CoherentBus::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id)
{
+ // snoops should only happen if the system isn't bypassing caches
+ assert(!system->bypassCaches());
+
for (SlavePortIter s = snoopPorts.begin(); s != snoopPorts.end(); ++s) {
SlavePort *p = *s;
// we could have gotten this request from a snooping master
Tick snoop_response_latency = 0;
// uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable()) {
+ if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
// forward to all snoopers but the source
std::pair<MemCmd, Tick> snoop_result =
forwardAtomic(pkt, slave_port_id);
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
Tick snoop_response_latency = 0;
+ // snoops should only happen if the system isn't bypassing caches
+ assert(!system->bypassCaches());
+
for (SlavePortIter s = snoopPorts.begin(); s != snoopPorts.end(); ++s) {
SlavePort *p = *s;
// we could have gotten this request from a snooping master
}
// uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable()) {
+ if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
// forward to all snoopers but the source
forwardFunctional(pkt, slave_port_id);
}
void
CoherentBus::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
{
+ // snoops should only happen if the system isn't bypassing caches
+ assert(!system->bypassCaches());
+
for (SlavePortIter s = snoopPorts.begin(); s != snoopPorts.end(); ++s) {
SlavePort *p = *s;
// we could have gotten this request from a snooping master
*/
std::set<RequestPtr> outstandingReq;
+ /**
+ * Keep a pointer to the system to be allow to querying memory system
+ * properties.
+ */
+ System *system;
+
/** Function called by the port when the bus is recieving a Timing
request packet.*/
virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
_memory_modes = {
"atomic" : objects.params.atomic,
"timing" : objects.params.timing,
+ "atomic_noncaching" : objects.params.atomic_noncaching,
}
# The final hook to generate .ini files. Called from the user script
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
+ # Flush the memory system if we are switching to a memory mode
+ # that disables caches. This typically happens when switching to a
+ # hardware virtualized CPU.
+ if memory_mode == objects.params.atomic_noncaching:
+ memWriteback(system)
+ memInvalidate(system)
+
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
from SimpleMemory import *
-class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing']
+class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing',
+ 'atomic_noncaching']
class System(MemObject):
type = 'System'
@classmethod
def export_methods(cls, code):
code('''
- Enums::MemoryMode getMemoryMode();
+ Enums::MemoryMode getMemoryMode() const;
void setMemoryMode(Enums::MemoryMode mode);
''')
return masterIds[master_id];
}
-const char *System::MemoryModeStrings[3] = {"invalid", "atomic",
- "timing"};
+const char *System::MemoryModeStrings[4] = {"invalid", "atomic", "timing",
+ "atomic_noncaching"};
System *
SystemParams::create()
BaseMasterPort& getMasterPort(const std::string &if_name,
PortID idx = InvalidPortID);
- static const char *MemoryModeStrings[3];
+ static const char *MemoryModeStrings[4];
- Enums::MemoryMode
- getMemoryMode()
- {
- assert(memoryMode);
- return memoryMode;
+ /** @{ */
+ /**
+ * Is the system in atomic mode?
+ *
+ * There are currently two different atomic memory modes:
+ * 'atomic', which supports caches; and 'atomic_noncaching', which
+ * bypasses caches. The latter is used by hardware virtualized
+ * CPUs. SimObjects are expected to use Port::sendAtomic() and
+ * Port::recvAtomic() when accessing memory in this mode.
+ */
+ bool isAtomicMode() const {
+ return memoryMode == Enums::atomic ||
+ memoryMode == Enums::atomic_noncaching;
}
- /** Change the memory mode of the system. This should only be called by the
- * python!!
- * @param mode Mode to change to (atomic/timing)
+ /**
+ * Is the system in timing mode?
+ *
+ * SimObjects are expected to use Port::sendTiming() and
+ * Port::recvTiming() when accessing memory in this mode.
+ */
+ bool isTimingMode() const {
+ return memoryMode == Enums::timing;
+ }
+
+ /**
+ * Should caches be bypassed?
+ *
+ * Some CPUs need to bypass caches to allow direct memory
+ * accesses, which is required for hardware virtualization.
+ */
+ bool bypassCaches() const {
+ return memoryMode == Enums::atomic_noncaching;
+ }
+ /** @} */
+
+ /** @{ */
+ /**
+ * Get the memory mode of the system.
+ *
+ * \warn This should only be used by the Python world. The C++
+ * world should use one of the query functions above
+ * (isAtomicMode(), isTimingMode(), bypassCaches()).
+ */
+ Enums::MemoryMode getMemoryMode() const { return memoryMode; }
+
+ /**
+ * Change the memory mode of the system.
+ *
+ * \warn This should only be called by the Python!
+ *
+ * @param mode Mode to change to (atomic/timing/...)
*/
void setMemoryMode(Enums::MemoryMode mode);
+ /** @} */
PCEventQueue pcEventQueue;