# base address (including the PCI config space)
self.bridge = Bridge(delay='50ns', nack_delay='4ns',
ranges = [AddrRange(IO_address_space_base, Addr.max)])
- self.physmem = PhysicalMemory(range = AddrRange(mdesc.mem()))
+ self.physmem = SimpleMemory(range = AddrRange(mdesc.mem()))
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.physmem.port = self.membus.master
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
- physmem = PhysicalMemory(range = AddrRange(mdesc.mem()))
+ physmem = SimpleMemory(range = AddrRange(mdesc.mem()))
self = LinuxAlphaSystem(physmem = physmem)
if not mdesc:
# generic system
self.t1000 = T1000()
self.t1000.attachOnChipIO(self.membus)
self.t1000.attachIO(self.iobus)
- self.physmem = PhysicalMemory(range = AddrRange(Addr('1MB'), size = '64MB'), zero = True)
- self.physmem2 = PhysicalMemory(range = AddrRange(Addr('2GB'), size ='256MB'), zero = True)
+ self.physmem = SimpleMemory(range = AddrRange(Addr('1MB'), size = '64MB'),
+ zero = True)
+ self.physmem2 = SimpleMemory(range = AddrRange(Addr('2GB'), size ='256MB'),
+ zero = True)
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.physmem.port = self.membus.master
if bare_metal:
# EOT character on UART will end the simulation
self.realview.uart.end_on_eot = True
- self.physmem = PhysicalMemory(range = AddrRange(Addr(mdesc.mem())),
- zero = True)
+ self.physmem = SimpleMemory(range = AddrRange(Addr(mdesc.mem())),
+ zero = True)
else:
self.kernel = binary('vmlinux.arm.smp.fb.2.6.38.8')
self.machine_type = machine_type
boot_flags = 'earlyprintk console=ttyAMA0 lpj=19988480 norandmaps ' + \
'rw loglevel=8 mem=%s root=/dev/sda1' % mdesc.mem()
- self.physmem = PhysicalMemory(range = AddrRange(self.realview.mem_start_addr,
- size = mdesc.mem()))
+ self.physmem = SimpleMemory(range =
+ AddrRange(self.realview.mem_start_addr,
+ size = mdesc.mem()),
+ conf_table_reported = True)
self.realview.setupBootLoader(self.membus, self, binary)
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
self.iobus = Bus(bus_id=0)
self.membus = MemBus(bus_id=1)
self.bridge = Bridge(delay='50ns', nack_delay='4ns')
- self.physmem = PhysicalMemory(range = AddrRange('1GB'))
+ self.physmem = SimpleMemory(range = AddrRange('1GB'))
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.physmem.port = self.membus.master
self.mem_mode = mem_mode
# Physical memory
- self.physmem = PhysicalMemory(range = AddrRange(mdesc.mem()))
+ self.physmem = SimpleMemory(range = AddrRange(mdesc.mem()))
# Platform
self.pc = Pc()
prototypes.insert(0, next)
# system simulated
-system = System(funcmem = PhysicalMemory(),
- physmem = PhysicalMemory(latency = "100ns"))
+system = System(funcmem = SimpleMemory(in_addr_map = False),
+ physmem = SimpleMemory(latency = "100ns"))
def make_level(spec, prototypes, attach_obj, attach_port):
fanout = spec[0]
sys.exit(1)
#
-# Create the M5 system. Note that the PhysicalMemory Object isn't
+# Create the M5 system. Note that the Memory Object isn't
# actually used by the rubytester, but is included to support the
# M5 memory size == Ruby memory size checks
#
-system = System(physmem = PhysicalMemory())
+system = System(physmem = SimpleMemory())
#
# Create the ruby random tester
for i in xrange(options.num_cpus) ]
system = System(cpu = cpus,
- funcmem = PhysicalMemory(),
- physmem = PhysicalMemory())
+ funcmem = SimpleMemory(in_addr_map = False),
+ physmem = SimpleMemory())
if options.num_dmas > 0:
dmas = [ MemTest(atomic = False,
# create the desired simulated system
system = System(cpu = cpus,
- physmem = PhysicalMemory())
+ physmem = SimpleMemory())
Ruby.create_system(options, system)
wakeup_frequency = options.wakeup_freq)
#
-# Create the M5 system. Note that the PhysicalMemory Object isn't
+# Create the M5 system. Note that the Memory Object isn't
# actually used by the rubytester, but is included to support the
# M5 memory size == Ruby memory size checks
#
-system = System(tester = tester, physmem = PhysicalMemory())
+system = System(tester = tester, physmem = SimpleMemory())
Ruby.create_system(options, system)
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
- physmem = PhysicalMemory(range=AddrRange("512MB")),
+ physmem = SimpleMemory(range=AddrRange("512MB")),
membus = Bus(), mem_mode = test_mem_mode)
# Sanity check
cntrl_count += 1
- phys_mem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
mem_module_size = phys_mem_size / options.num_dirs
for i in xrange(options.num_dirs):
cntrl_count += 1
- phys_mem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
mem_module_size = phys_mem_size / options.num_dirs
for i in xrange(options.num_dirs):
cntrl_count += 1
- phys_mem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
mem_module_size = phys_mem_size / options.num_dirs
for i in xrange(options.num_dirs):
cntrl_count += 1
- phys_mem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
mem_module_size = phys_mem_size / options.num_dirs
for i in xrange(options.num_dirs):
cntrl_count += 1
- phys_mem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
mem_module_size = phys_mem_size / options.num_dirs
#
cntrl_count += 1
- phys_mem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
mem_module_size = phys_mem_size / options.num_dirs
for i in xrange(options.num_dirs):
total_mem_size.value += dir_cntrl.directory.size.value
dir_cntrl.directory.numa_high_bit = numa_bit
- physmem_size = long(system.physmem.range.second) - \
- long(system.physmem.range.first) + 1
- assert(total_mem_size.value == physmem_size)
+ phys_mem_size = 0
+ for mem in system.memories.unproxy(system):
+ phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
+ assert(total_mem_size.value == phys_mem_size)
ruby_profiler = RubyProfiler(ruby_system = ruby,
num_of_sequencers = len(cpu_sequencers))
# ----------------------
# Create a system, and add system wide objects
# ----------------------
-system = System(cpu = all_cpus, l1_ = all_l1s, l1bus_ = all_l1buses, physmem = PhysicalMemory(),
- membus = Bus(clock = busFrequency))
+system = System(cpu = all_cpus, l1_ = all_l1s, l1bus_ = all_l1buses,
+ physmem = SimpleMemory(), membus = Bus(clock = busFrequency))
system.toL2bus = Bus(clock = busFrequency)
system.l2 = L2(size = options.l2size, assoc = 8)
# ----------------------
# Create a system, and add system wide objects
# ----------------------
-system = System(cpu = cpus, physmem = PhysicalMemory(),
+system = System(cpu = cpus, physmem = SimpleMemory(),
membus = Bus(clock = busFrequency))
system.toL2bus = Bus(clock = busFrequency)
do {
if (IsK0Seg(va)) {
- if (va < (K0SegBase + pmem->size())) {
+ if (va < (K0SegBase + system->memSize())) {
DPRINTF(GDBAcc, "acc: Mapping is valid K0SEG <= "
"%#x < K0SEG + size\n", va);
return true;
class System;
class ThreadContext;
-class PhysicalMemory;
namespace AlphaISA {
# 0x0 Revision
midr_regval = Param.UInt32(0x350fc000, "MIDR value")
boot_loader = Param.String("", "File that contains the boot loader code if any")
- boot_loader_mem = Param.PhysicalMemory(NULL,
- "Memory object that boot loader is to be loaded into")
gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface")
flags_addr = Param.Addr(0, "Address of the flags register for MP booting")
machine_type = Param.ArmMachineType('RealView_PBX',
"Machine id from http://www.arm.linux.org.uk/developer/machines/")
atags_addr = Param.Addr(0x100, "Address where default atags structure should be written")
-
-
ac->pagesize(8192);
ac->rootdev(0);
+ AddrRangeList atagRanges = physmem.getConfAddrRanges();
+ if (atagRanges.size() != 1) {
+ fatal("Expected a single ATAG memory entry but got %d\n",
+ atagRanges.size());
+ }
AtagMem *am = new AtagMem;
- am->memSize(params()->physmem->size());
- am->memStart(params()->physmem->start());
+ am->memSize(atagRanges.begin()->size());
+ am->memStart(atagRanges.begin()->start);
AtagCmdline *ad = new AtagCmdline;
ad->cmdline(params()->boot_osflags);
class System;
class ThreadContext;
-class PhysicalMemory;
namespace ArmISA
{
ArmSystem::ArmSystem(Params *p)
: System(p), bootldr(NULL)
{
- if ((p->boot_loader == "") != (p->boot_loader_mem == NULL))
- fatal("If boot_loader is specifed, memory to load it must be also.\n");
-
if (p->boot_loader != "") {
bootldr = createObjectFile(p->boot_loader);
class System;
class ThreadContext;
-class PhysicalMemory;
namespace MipsISA
{
from m5.params import *
-from PhysicalMemory import *
+from SimpleMemory import SimpleMemory
from System import System
class SparcSystem(System):
_hypervisor_desc_base = 0x1f12080000
_partition_desc_base = 0x1f12000000
# ROM for OBP/Reset/Hypervisor
- rom = Param.PhysicalMemory(
- PhysicalMemory(range=AddrRange(_rom_base, size='8MB')),
+ rom = Param.SimpleMemory(
+ SimpleMemory(range=AddrRange(_rom_base, size='8MB')),
"Memory to hold the ROM data")
# nvram
- nvram = Param.PhysicalMemory(
- PhysicalMemory(range=AddrRange(_nvram_base, size='8kB')),
+ nvram = Param.SimpleMemory(
+ SimpleMemory(range=AddrRange(_nvram_base, size='8kB')),
"Memory to hold the nvram data")
# hypervisor description
- hypervisor_desc = Param.PhysicalMemory(
- PhysicalMemory(range=AddrRange(_hypervisor_desc_base, size='8kB')),
+ hypervisor_desc = Param.SimpleMemory(
+ SimpleMemory(range=AddrRange(_hypervisor_desc_base, size='8kB')),
"Memory to hold the hypervisor description")
# partition description
- partition_desc = Param.PhysicalMemory(
- PhysicalMemory(range=AddrRange(_partition_desc_base, size='8kB')),
+ partition_desc = Param.SimpleMemory(
+ SimpleMemory(range=AddrRange(_partition_desc_base, size='8kB')),
"Memory to hold the partition description")
reset_addr = Param.Addr(_rom_base, "Address to load ROM at")
class System;
class ThreadContext;
-class PhysicalMemory;
namespace SparcISA
{
BaseRemoteGDB::BaseRemoteGDB(System *_system, ThreadContext *c, size_t cacheSize)
: event(NULL), listener(NULL), number(-1), fd(-1),
active(false), attached(false),
- system(_system), pmem(_system->physmem), context(c),
+ system(_system), context(c),
gdbregs(cacheSize)
{
memset(gdbregs.regs, 0, gdbregs.bytes());
class System;
class ThreadContext;
-class PhysicalMemory;
class GDBListener;
bool attached;
System *system;
- PhysicalMemory *pmem;
ThreadContext *context;
protected:
System *getSystemPtr() { return actualTC->getSystemPtr(); }
- PhysicalMemory *getPhysMemPtr() { return actualTC->getPhysMemPtr(); }
-
TheISA::Kernel::Statistics *getKernelStats()
{ return actualTC->getKernelStats(); }
void setNextMicroPC(uint64_t val) { };
- /** Returns a pointer to physical memory. */
- PhysicalMemory *getPhysMemPtr()
- { assert(0); return 0; /*return cpu->physmem;*/ }
-
/** Returns a pointer to this thread's kernel statistics. */
TheISA::Kernel::Statistics *getKernelStats()
{ return thread->kernelStats; }
#include "debug/Activity.hh"
#include "debug/Fetch.hh"
#include "mem/packet.hh"
-#include "mem/request.hh"
#include "params/DerivO3CPU.hh"
#include "sim/byteswap.hh"
#include "sim/core.hh"
// Check that we're not going off into random memory
// If we have, just wait around for commit to squash something and put
// us on the right track
- if (!cpu->system->isMemory(mem_req->getPaddr())) {
+ if (!cpu->system->isMemAddr(mem_req->getPaddr())) {
warn("Address %#x is outside of physical memory, stopping fetch\n",
mem_req->getPaddr());
fetchStatus[tid] = NoGoodAddr;
class EndQuiesceEvent;
class MemoryController;
class MemObject;
-class PhysicalMemory;
class Process;
class Request;
System *getSystemPtr() { return cpu->system; }
- PhysicalMemory *getPhysMemPtr() { return cpu->physmem; }
-
TheISA::Kernel::Statistics *getKernelStats()
{ return thread->getKernelStats(); }
TheISA::TLB *itb;
TheISA::TLB *dtb;
System *system;
- PhysicalMemory *physmem;
FrontEnd *frontEnd;
}
}
- if (fastmem) {
- AddrRangeList pmAddrList = system->physmem->getAddrRanges();
- physMemAddr = *pmAddrList.begin();
- }
// Atomic doesn't do MT right now, so contextId == threadId
ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
if (req->isMmappedIpr())
dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
else {
- if (fastmem && pkt.getAddr() == physMemAddr)
- dcache_latency += system->physmem->doAtomicAccess(&pkt);
+ if (fastmem && system->isMemAddr(pkt.getAddr()))
+ system->getPhysMem().access(&pkt);
else
dcache_latency += dcachePort.sendAtomic(&pkt);
}
dcache_latency +=
TheISA::handleIprWrite(thread->getTC(), &pkt);
} else {
- if (fastmem && pkt.getAddr() == physMemAddr)
- dcache_latency += system->physmem->doAtomicAccess(&pkt);
+ if (fastmem && system->isMemAddr(pkt.getAddr()))
+ system->getPhysMem().access(&pkt);
else
dcache_latency += dcachePort.sendAtomic(&pkt);
}
Packet::Broadcast);
ifetch_pkt.dataStatic(&inst);
- if (fastmem && ifetch_pkt.getAddr() == physMemAddr)
- icache_latency =
- system->physmem->doAtomicAccess(&ifetch_pkt);
+ if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
+ system->getPhysMem().access(&ifetch_pkt);
else
icache_latency = icachePort.sendAtomic(&ifetch_pkt);
bool dcache_access;
Tick dcache_latency;
- Range<Addr> physMemAddr;
-
protected:
/** Return a reference to the data port. */
alphaAccess->kernStart = system->getKernelStart();
alphaAccess->kernEnd = system->getKernelEnd();
alphaAccess->entryPoint = system->getKernelEntry();
- alphaAccess->mem_size = system->physmem->size();
+ alphaAccess->mem_size = system->memSize();
alphaAccess->cpuClock = cpu->frequency() / 1000000; // In MHz
Tsunami *tsunami = dynamic_cast<Tsunami *>(params()->platform);
if (!tsunami)
from Platform import Platform
from Terminal import Terminal
from Uart import Uart
-from PhysicalMemory import *
+from SimpleMemory import SimpleMemory
class AmbaDevice(BasicPioDevice):
type = 'AmbaDevice'
max_mem_size = Param.Addr('256MB', "Maximum amount of RAM supported by platform")
def setupBootLoader(self, mem_bus, cur_sys, loc):
- self.nvmem = PhysicalMemory(range = AddrRange(Addr('2GB'), size = '64MB'), zero = True)
+ self.nvmem = SimpleMemory(range = AddrRange(Addr('2GB'),
+ size = '64MB'),
+ zero = True)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot.arm')
- cur_sys.boot_loader_mem = self.nvmem
# Reference for memory map and interrupt number
BAR0 = 0x1C1A0000, BAR0Size = '256B',
BAR1 = 0x1C1A0100, BAR1Size = '4096B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
- vram = PhysicalMemory(range = AddrRange(0x18000000, size='32MB'), zero = True)
+ vram = SimpleMemory(range = AddrRange(0x18000000, size='32MB'),
+ zero = True)
rtc = PL031(pio_addr=0x1C170000, int_num=36)
l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xfff)
mmc_fake = AmbaFake(pio_addr=0x1c050000)
def setupBootLoader(self, mem_bus, cur_sys, loc):
- self.nvmem = PhysicalMemory(range = AddrRange(0, size = '64MB'), zero = True)
+ self.nvmem = SimpleMemory(range = AddrRange(0, size = '64MB'),
+ zero = True)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm')
- cur_sys.boot_loader_mem = self.nvmem
cur_sys.atags_addr = 0x80000100
# Attach I/O devices that are on chip and also set the appropriate
Dma_Transfer
} DmaState_t;
-class PhysicalMemory;
class IdeController;
/**
--- /dev/null
+# Copyright (c) 2012 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Copyright (c) 2005-2008 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+# Andreas Hansson
+
+from m5.params import *
+from MemObject import MemObject
+
+class AbstractMemory(MemObject):
+ type = 'AbstractMemory'
+ abstract = True
+ range = Param.AddrRange(AddrRange('128MB'), "Address range")
+ file = Param.String('', "Memory-mapped file")
+ null = Param.Bool(False, "Do not store data, always return zero")
+ zero = Param.Bool(False, "Initialize memory with zeros")
+
+ # All memories are passed to the global physical memory, and
+ # certain memories may be excluded from the global address map,
+ # e.g. by the testers that use shadow memories as a reference
+ in_addr_map = Param.Bool(True, "Memory part of the global address map")
+
+ # Should the bootloader include this memory when passing
+ # configuration information about the physical memory layout to
+ # the kernel, e.g. using ATAG or ACPI
+ conf_table_reported = Param.Bool(False, "Report to configuration table")
+++ /dev/null
-# Copyright (c) 2005-2008 The Regents of The University of Michigan
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Nathan Binkert
-
-from m5.params import *
-from m5.proxy import *
-from MemObject import *
-
-class PhysicalMemory(MemObject):
- type = 'PhysicalMemory'
- port = VectorSlavePort("the access port")
- range = Param.AddrRange(AddrRange('128MB'), "Device Address")
- file = Param.String('', "memory mapped file")
- latency = Param.Latency('30ns', "latency of an access")
- latency_var = Param.Latency('0ns', "access variablity")
- zero = Param.Bool(False, "zero initialize memory")
- null = Param.Bool(False, "do not store data, always return zero")
Source('se_translating_port_proxy.cc')
if env['TARGET_ISA'] != 'no':
- SimObject('PhysicalMemory.py')
+ SimObject('AbstractMemory.py')
+ SimObject('SimpleMemory.py')
+ Source('abstract_mem.cc')
+ Source('simple_mem.cc')
Source('page_table.cc')
Source('physical.cc')
--- /dev/null
+# Copyright (c) 2012 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Copyright (c) 2005-2008 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+# Andreas Hansson
+
+from m5.params import *
+from AbstractMemory import *
+
+class SimpleMemory(AbstractMemory):
+ type = 'SimpleMemory'
+ port = VectorSlavePort("Slave ports")
+ latency = Param.Latency('30ns', "Request to response latency")
+ latency_var = Param.Latency('0ns', "Request to response latency variance")
--- /dev/null
+/*
+ * Copyright (c) 2010-2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Ali Saidi
+ * Andreas Hansson
+ */
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/user.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <zlib.h>
+
+#include <cerrno>
+#include <cstdio>
+#include <iostream>
+#include <string>
+
+#include "arch/registers.hh"
+#include "config/the_isa.hh"
+#include "debug/LLSC.hh"
+#include "debug/MemoryAccess.hh"
+#include "mem/abstract_mem.hh"
+#include "mem/packet_access.hh"
+
+using namespace std;
+
+AbstractMemory::AbstractMemory(const Params *p) :
+ MemObject(p), range(params()->range), pmemAddr(NULL),
+ confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map)
+{
+ if (size() % TheISA::PageBytes != 0)
+ panic("Memory Size not divisible by page size\n");
+
+ if (params()->null)
+ return;
+
+ if (params()->file == "") {
+ int map_flags = MAP_ANON | MAP_PRIVATE;
+ pmemAddr = (uint8_t *)mmap(NULL, size(),
+ PROT_READ | PROT_WRITE, map_flags, -1, 0);
+ } else {
+ int map_flags = MAP_PRIVATE;
+ int fd = open(params()->file.c_str(), O_RDONLY);
+ long _size = lseek(fd, 0, SEEK_END);
+ if (_size != range.size()) {
+ warn("Specified size %d does not match file %s %d\n", range.size(),
+ params()->file, _size);
+ range = RangeSize(range.start, _size);
+ }
+ lseek(fd, 0, SEEK_SET);
+ pmemAddr = (uint8_t *)mmap(NULL, roundUp(_size, sysconf(_SC_PAGESIZE)),
+ PROT_READ | PROT_WRITE, map_flags, fd, 0);
+ }
+
+ if (pmemAddr == (void *)MAP_FAILED) {
+ perror("mmap");
+ if (params()->file == "")
+ fatal("Could not mmap!\n");
+ else
+ fatal("Could not find file: %s\n", params()->file);
+ }
+
+ //If requested, initialize all the memory to 0
+ if (p->zero)
+ memset(pmemAddr, 0, size());
+}
+
+
+AbstractMemory::~AbstractMemory()
+{
+ if (pmemAddr)
+ munmap((char*)pmemAddr, size());
+}
+
+void
+AbstractMemory::regStats()
+{
+ using namespace Stats;
+
+ bytesRead
+ .name(name() + ".bytes_read")
+ .desc("Number of bytes read from this memory")
+ ;
+ bytesInstRead
+ .name(name() + ".bytes_inst_read")
+ .desc("Number of instructions bytes read from this memory")
+ ;
+ bytesWritten
+ .name(name() + ".bytes_written")
+ .desc("Number of bytes written to this memory")
+ ;
+ numReads
+ .name(name() + ".num_reads")
+ .desc("Number of read requests responded to by this memory")
+ ;
+ numWrites
+ .name(name() + ".num_writes")
+ .desc("Number of write requests responded to by this memory")
+ ;
+ numOther
+ .name(name() + ".num_other")
+ .desc("Number of other requests responded to by this memory")
+ ;
+ bwRead
+ .name(name() + ".bw_read")
+ .desc("Total read bandwidth from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bytesRead)
+ ;
+ bwInstRead
+ .name(name() + ".bw_inst_read")
+ .desc("Instruction read bandwidth from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bytesInstRead)
+ ;
+ bwWrite
+ .name(name() + ".bw_write")
+ .desc("Write bandwidth from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bytesWritten)
+ ;
+ bwTotal
+ .name(name() + ".bw_total")
+ .desc("Total bandwidth to/from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bwTotal)
+ ;
+ bwRead = bytesRead / simSeconds;
+ bwInstRead = bytesInstRead / simSeconds;
+ bwWrite = bytesWritten / simSeconds;
+ bwTotal = (bytesRead + bytesWritten) / simSeconds;
+}
+
+Range<Addr>
+AbstractMemory::getAddrRange()
+{
+ return range;
+}
+
+// Add load-locked to tracking list. Should only be called if the
+// operation is a load and the LLSC flag is set.
+void
+AbstractMemory::trackLoadLocked(PacketPtr pkt)
+{
+ Request *req = pkt->req;
+ Addr paddr = LockedAddr::mask(req->getPaddr());
+
+ // first we check if we already have a locked addr for this
+ // xc. Since each xc only gets one, we just update the
+ // existing record with the new address.
+ list<LockedAddr>::iterator i;
+
+ for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
+ if (i->matchesContext(req)) {
+ DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
+ req->contextId(), paddr);
+ i->addr = paddr;
+ return;
+ }
+ }
+
+ // no record for this xc: need to allocate a new one
+ DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
+ req->contextId(), paddr);
+ lockedAddrList.push_front(LockedAddr(req));
+}
+
+
+// Called on *writes* only... both regular stores and
+// store-conditional operations. Check for conventional stores which
+// conflict with locked addresses, and for success/failure of store
+// conditionals.
+bool
+AbstractMemory::checkLockedAddrList(PacketPtr pkt)
+{
+ Request *req = pkt->req;
+ Addr paddr = LockedAddr::mask(req->getPaddr());
+ bool isLLSC = pkt->isLLSC();
+
+ // Initialize return value. Non-conditional stores always
+ // succeed. Assume conditional stores will fail until proven
+ // otherwise.
+ bool success = !isLLSC;
+
+ // Iterate over list. Note that there could be multiple matching
+ // records, as more than one context could have done a load locked
+ // to this location.
+ list<LockedAddr>::iterator i = lockedAddrList.begin();
+
+ while (i != lockedAddrList.end()) {
+
+ if (i->addr == paddr) {
+ // we have a matching address
+
+ if (isLLSC && i->matchesContext(req)) {
+ // it's a store conditional, and as far as the memory
+ // system can tell, the requesting context's lock is
+ // still valid.
+ DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
+ req->contextId(), paddr);
+ success = true;
+ }
+
+ // Get rid of our record of this lock and advance to next
+ DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
+ i->contextId, paddr);
+ i = lockedAddrList.erase(i);
+ }
+ else {
+ // no match: advance to next record
+ ++i;
+ }
+ }
+
+ if (isLLSC) {
+ req->setExtraData(success ? 1 : 0);
+ }
+
+ return success;
+}
+
+
+#if TRACING_ON
+
+#define CASE(A, T) \
+ case sizeof(T): \
+ DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
+ A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
+ break
+
+
+#define TRACE_PACKET(A) \
+ do { \
+ switch (pkt->getSize()) { \
+ CASE(A, uint64_t); \
+ CASE(A, uint32_t); \
+ CASE(A, uint16_t); \
+ CASE(A, uint8_t); \
+ default: \
+ DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
+ A, pkt->getSize(), pkt->getAddr()); \
+ DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
+ } \
+ } while (0)
+
+#else
+
+#define TRACE_PACKET(A)
+
+#endif
+
+void
+AbstractMemory::access(PacketPtr pkt)
+{
+ assert(pkt->getAddr() >= range.start &&
+ (pkt->getAddr() + pkt->getSize() - 1) <= range.end);
+
+ if (pkt->memInhibitAsserted()) {
+ DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
+ pkt->getAddr());
+ return;
+ }
+
+ uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start;
+
+ if (pkt->cmd == MemCmd::SwapReq) {
+ TheISA::IntReg overwrite_val;
+ bool overwrite_mem;
+ uint64_t condition_val64;
+ uint32_t condition_val32;
+
+ if (!pmemAddr)
+ panic("Swap only works if there is real memory (i.e. null=False)");
+ assert(sizeof(TheISA::IntReg) >= pkt->getSize());
+
+ overwrite_mem = true;
+ // keep a copy of our possible write value, and copy what is at the
+ // memory address into the packet
+ std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
+ std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+
+ if (pkt->req->isCondSwap()) {
+ if (pkt->getSize() == sizeof(uint64_t)) {
+ condition_val64 = pkt->req->getExtraData();
+ overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
+ sizeof(uint64_t));
+ } else if (pkt->getSize() == sizeof(uint32_t)) {
+ condition_val32 = (uint32_t)pkt->req->getExtraData();
+ overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
+ sizeof(uint32_t));
+ } else
+ panic("Invalid size for conditional read/write\n");
+ }
+
+ if (overwrite_mem)
+ std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
+
+ assert(!pkt->req->isInstFetch());
+ TRACE_PACKET("Read/Write");
+ numOther++;
+ } else if (pkt->isRead()) {
+ assert(!pkt->isWrite());
+ if (pkt->isLLSC()) {
+ trackLoadLocked(pkt);
+ }
+ if (pmemAddr)
+ memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
+ numReads++;
+ bytesRead += pkt->getSize();
+ if (pkt->req->isInstFetch())
+ bytesInstRead += pkt->getSize();
+ } else if (pkt->isWrite()) {
+ if (writeOK(pkt)) {
+ if (pmemAddr)
+ memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
+ assert(!pkt->req->isInstFetch());
+ TRACE_PACKET("Write");
+ numWrites++;
+ bytesWritten += pkt->getSize();
+ }
+ } else if (pkt->isInvalidate()) {
+ // no need to do anything
+ } else {
+ panic("unimplemented");
+ }
+
+ if (pkt->needsResponse()) {
+ pkt->makeResponse();
+ }
+}
+
+void
+AbstractMemory::functionalAccess(PacketPtr pkt)
+{
+ assert(pkt->getAddr() >= range.start &&
+ (pkt->getAddr() + pkt->getSize() - 1) <= range.end);
+
+ uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start;
+
+ if (pkt->isRead()) {
+ if (pmemAddr)
+ memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ TRACE_PACKET("Read");
+ pkt->makeResponse();
+ } else if (pkt->isWrite()) {
+ if (pmemAddr)
+ memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
+ TRACE_PACKET("Write");
+ pkt->makeResponse();
+ } else if (pkt->isPrint()) {
+ Packet::PrintReqState *prs =
+ dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
+ // Need to call printLabels() explicitly since we're not going
+ // through printObj().
+ prs->printLabels();
+ // Right now we just print the single byte at the specified address.
+ ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
+ } else {
+ panic("AbstractMemory: unimplemented functional command %s",
+ pkt->cmdString());
+ }
+}
+
+void
+AbstractMemory::serialize(ostream &os)
+{
+ if (!pmemAddr)
+ return;
+
+ gzFile compressedMem;
+ string filename = name() + ".physmem";
+ long _size = range.size();
+
+ SERIALIZE_SCALAR(filename);
+ SERIALIZE_SCALAR(_size);
+
+ // write memory file
+ string thefile = Checkpoint::dir() + "/" + filename.c_str();
+ int fd = creat(thefile.c_str(), 0664);
+ if (fd < 0) {
+ perror("creat");
+ fatal("Can't open physical memory checkpoint file '%s'\n", filename);
+ }
+
+ compressedMem = gzdopen(fd, "wb");
+ if (compressedMem == NULL)
+ fatal("Insufficient memory to allocate compression state for %s\n",
+ filename);
+
+ if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
+ fatal("Write failed on physical memory checkpoint file '%s'\n",
+ filename);
+ }
+
+ if (gzclose(compressedMem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
+
+ list<LockedAddr>::iterator i = lockedAddrList.begin();
+
+ vector<Addr> lal_addr;
+ vector<int> lal_cid;
+ while (i != lockedAddrList.end()) {
+ lal_addr.push_back(i->addr);
+ lal_cid.push_back(i->contextId);
+ i++;
+ }
+ arrayParamOut(os, "lal_addr", lal_addr);
+ arrayParamOut(os, "lal_cid", lal_cid);
+}
+
+void
+AbstractMemory::unserialize(Checkpoint *cp, const string §ion)
+{
+ if (!pmemAddr)
+ return;
+
+ gzFile compressedMem;
+ long *tempPage;
+ long *pmem_current;
+ uint64_t curSize;
+ uint32_t bytesRead;
+ const uint32_t chunkSize = 16384;
+
+ string filename;
+
+ UNSERIALIZE_SCALAR(filename);
+
+ filename = cp->cptDir + "/" + filename;
+
+ // mmap memoryfile
+ int fd = open(filename.c_str(), O_RDONLY);
+ if (fd < 0) {
+ perror("open");
+ fatal("Can't open physical memory checkpoint file '%s'", filename);
+ }
+
+ compressedMem = gzdopen(fd, "rb");
+ if (compressedMem == NULL)
+ fatal("Insufficient memory to allocate compression state for %s\n",
+ filename);
+
+ // unmap file that was mmapped in the constructor
+ // This is done here to make sure that gzip and open don't muck with our
+ // nice large space of memory before we reallocate it
+ munmap((char*)pmemAddr, size());
+
+ long _size;
+ UNSERIALIZE_SCALAR(_size);
+ if (_size > params()->range.size())
+ fatal("Memory size has changed! size %lld, param size %lld\n",
+ _size, params()->range.size());
+
+ pmemAddr = (uint8_t *)mmap(NULL, size(),
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+
+ if (pmemAddr == (void *)MAP_FAILED) {
+ perror("mmap");
+ fatal("Could not mmap physical memory!\n");
+ }
+
+ curSize = 0;
+ tempPage = (long*)malloc(chunkSize);
+ if (tempPage == NULL)
+ fatal("Unable to malloc memory to read file %s\n", filename);
+
+ /* Only copy bytes that are non-zero, so we don't give the VM system hell */
+ while (curSize < size()) {
+ bytesRead = gzread(compressedMem, tempPage, chunkSize);
+ if (bytesRead == 0)
+ break;
+
+ assert(bytesRead % sizeof(long) == 0);
+
+ for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
+ {
+ if (*(tempPage+x) != 0) {
+ pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
+ *pmem_current = *(tempPage+x);
+ }
+ }
+ curSize += bytesRead;
+ }
+
+ free(tempPage);
+
+ if (gzclose(compressedMem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
+
+ vector<Addr> lal_addr;
+ vector<int> lal_cid;
+ arrayParamIn(cp, section, "lal_addr", lal_addr);
+ arrayParamIn(cp, section, "lal_cid", lal_cid);
+ for(int i = 0; i < lal_addr.size(); i++)
+ lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Andreas Hansson
+ */
+
+/**
+ * @file
+ * AbstractMemory declaration
+ */
+
+#ifndef __ABSTRACT_MEMORY_HH__
+#define __ABSTRACT_MEMORY_HH__
+
+#include "mem/mem_object.hh"
+#include "params/AbstractMemory.hh"
+#include "sim/stats.hh"
+
+/**
+ * An abstract memory represents a contiguous block of physical
+ * memory, with an associated address range, and also provides basic
+ * functionality for reading and writing this memory without any
+ * timing information. It is a MemObject since any subclass must have
+ * at least one slave port.
+ */
+class AbstractMemory : public MemObject
+{
+ protected:
+
+ // Address range of this memory
+ Range<Addr> range;
+
+ // Pointer to host memory used to implement this memory
+ uint8_t* pmemAddr;
+
+ // Enable specific memories to be reported to the configuration table
+ bool confTableReported;
+
+ // Should the memory appear in the global address map
+ bool inAddrMap;
+
+ class LockedAddr {
+
+ public:
+ // on alpha, minimum LL/SC granularity is 16 bytes, so lower
+ // bits need to masked off.
+ static const Addr Addr_Mask = 0xf;
+
+ static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
+
+ Addr addr; // locked address
+ int contextId; // locking hw context
+
+ // check for matching execution context
+ bool matchesContext(Request *req)
+ {
+ return (contextId == req->contextId());
+ }
+
+ LockedAddr(Request *req) : addr(mask(req->getPaddr())),
+ contextId(req->contextId())
+ {
+ }
+ // constructor for unserialization use
+ LockedAddr(Addr _addr, int _cid) : addr(_addr), contextId(_cid)
+ {
+ }
+ };
+
+ std::list<LockedAddr> lockedAddrList;
+
+ // helper function for checkLockedAddrs(): we really want to
+ // inline a quick check for an empty locked addr list (hopefully
+ // the common case), and do the full list search (if necessary) in
+ // this out-of-line function
+ bool checkLockedAddrList(PacketPtr pkt);
+
+ // Record the address of a load-locked operation so that we can
+ // clear the execution context's lock flag if a matching store is
+ // performed
+ void trackLoadLocked(PacketPtr pkt);
+
+ // Compare a store address with any locked addresses so we can
+ // clear the lock flag appropriately. Return value set to 'false'
+ // if store operation should be suppressed (because it was a
+ // conditional store and the address was no longer locked by the
+ // requesting execution context), 'true' otherwise. Note that
+ // this method must be called on *all* stores since even
+ // non-conditional stores must clear any matching lock addresses.
+ bool writeOK(PacketPtr pkt) {
+ Request *req = pkt->req;
+ if (lockedAddrList.empty()) {
+ // no locked addrs: nothing to check, store_conditional fails
+ bool isLLSC = pkt->isLLSC();
+ if (isLLSC) {
+ req->setExtraData(0);
+ }
+ return !isLLSC; // only do write if not an sc
+ } else {
+ // iterate over list...
+ return checkLockedAddrList(pkt);
+ }
+ }
+
+ /** Number of total bytes read from this memory */
+ Stats::Scalar bytesRead;
+ /** Number of instruction bytes read from this memory */
+ Stats::Scalar bytesInstRead;
+ /** Number of bytes written to this memory */
+ Stats::Scalar bytesWritten;
+ /** Number of read requests */
+ Stats::Scalar numReads;
+ /** Number of write requests */
+ Stats::Scalar numWrites;
+ /** Number of other requests */
+ Stats::Scalar numOther;
+ /** Read bandwidth from this memory */
+ Stats::Formula bwRead;
+ /** Read bandwidth from this memory */
+ Stats::Formula bwInstRead;
+ /** Write bandwidth from this memory */
+ Stats::Formula bwWrite;
+ /** Total bandwidth from this memory */
+ Stats::Formula bwTotal;
+
+ private:
+
+ // Prevent copying
+ AbstractMemory(const AbstractMemory&);
+
+ // Prevent assignment
+ AbstractMemory& operator=(const AbstractMemory&);
+
+ public:
+
+ typedef AbstractMemoryParams Params;
+
+ AbstractMemory(const Params* p);
+ virtual ~AbstractMemory();
+
+ const Params *
+ params() const
+ {
+ return dynamic_cast<const Params *>(_params);
+ }
+
+ /**
+ * Get the address range
+ *
+ * @return a single contigous address range
+ */
+ Range<Addr> getAddrRange();
+
+ /**
+ * Get the memory size.
+ *
+ * @return the size of the memory
+ */
+ uint64_t size() { return range.size(); }
+
+ /**
+ * Get the start address.
+ *
+ * @return the start address of the memory
+ */
+ Addr start() { return range.start; }
+
+ /**
+ * Should this memory be passed to the kernel and part of the OS
+ * physical memory layout.
+ *
+ * @return if this memory is reported
+ */
+ bool isConfReported() const { return confTableReported; }
+
+ /**
+ * Some memories are used as shadow memories or should for other
+ * reasons not be part of the global address map.
+ *
+ * @return if this memory is part of the address map
+ */
+ bool isInAddrMap() const { return inAddrMap; }
+
+ /**
+ * Perform an untimed memory access and update all the state
+ * (e.g. locked addresses) and statistics accordingly. The packet
+ * is turned into a response if required.
+ *
+ * @param pkt Packet performing the access
+ */
+ void access(PacketPtr pkt);
+
+ /**
+ * Perform an untimed memory read or write without changing
+ * anything but the memory itself. No stats are affected by this
+ * access. In addition to normal accesses this also facilitates
+ * print requests.
+ *
+ * @param pkt Packet performing the access
+ */
+ void functionalAccess(PacketPtr pkt);
+
+ /**
+ * Register Statistics
+ */
+ virtual void regStats();
+
+ virtual void serialize(std::ostream &os);
+ virtual void unserialize(Checkpoint *cp, const std::string §ion);
+
+};
+
+#endif //__ABSTRACT_MEMORY_HH__
// to forward the snoop up the hierarchy after the current
// transaction completes.
- // Actual target device (typ. PhysicalMemory) will delete the
+ // Actual target device (typ. a memory) will delete the
// packet on reception, so we need to save a copy here.
PacketPtr cp_pkt = new Packet(pkt, true);
targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
/*
- * Copyright (c) 2010-2011 ARM Limited
+ * Copyright (c) 2012 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
- * Copyright (c) 2001-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Ron Dreslinski
- * Ali Saidi
+ * Authors: Andreas Hansson
*/
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/user.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <zlib.h>
-
-#include <cerrno>
-#include <cstdio>
-#include <iostream>
-#include <string>
-
-#include "arch/isa_traits.hh"
-#include "arch/registers.hh"
-#include "base/intmath.hh"
-#include "base/misc.hh"
-#include "base/random.hh"
-#include "base/types.hh"
-#include "config/the_isa.hh"
-#include "debug/LLSC.hh"
-#include "debug/MemoryAccess.hh"
-#include "mem/packet_access.hh"
+#include "debug/BusAddrRanges.hh"
#include "mem/physical.hh"
-#include "sim/eventq.hh"
using namespace std;
-using namespace TheISA;
-
-PhysicalMemory::PhysicalMemory(const Params *p)
- : MemObject(p), pmemAddr(NULL), lat(p->latency), lat_var(p->latency_var),
- _size(params()->range.size()), _start(params()->range.start)
-{
- if (size() % TheISA::PageBytes != 0)
- panic("Memory Size not divisible by page size\n");
-
- // create the appropriate number of ports
- for (int i = 0; i < p->port_port_connection_count; ++i) {
- ports.push_back(new MemoryPort(csprintf("%s-port%d", name(), i),
- this));
- }
-
- if (params()->null)
- return;
-
-
- if (params()->file == "") {
- int map_flags = MAP_ANON | MAP_PRIVATE;
- pmemAddr = (uint8_t *)mmap(NULL, size(),
- PROT_READ | PROT_WRITE, map_flags, -1, 0);
- } else {
- int map_flags = MAP_PRIVATE;
- int fd = open(params()->file.c_str(), O_RDONLY);
- _size = lseek(fd, 0, SEEK_END);
- lseek(fd, 0, SEEK_SET);
- pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), sysconf(_SC_PAGESIZE)),
- PROT_READ | PROT_WRITE, map_flags, fd, 0);
- }
-
- if (pmemAddr == (void *)MAP_FAILED) {
- perror("mmap");
- if (params()->file == "")
- fatal("Could not mmap!\n");
- else
- fatal("Could not find file: %s\n", params()->file);
- }
-
- //If requested, initialize all the memory to 0
- if (p->zero)
- memset(pmemAddr, 0, size());
-}
-
-void
-PhysicalMemory::init()
-{
- for (PortIterator p = ports.begin(); p != ports.end(); ++p) {
- if (!(*p)->isConnected()) {
- fatal("PhysicalMemory port %s is unconnected!\n", (*p)->name());
- } else {
- (*p)->sendRangeChange();
- }
- }
-}
-
-PhysicalMemory::~PhysicalMemory()
-{
- if (pmemAddr)
- munmap((char*)pmemAddr, size());
-}
-
-void
-PhysicalMemory::regStats()
-{
- using namespace Stats;
-
- bytesRead
- .name(name() + ".bytes_read")
- .desc("Number of bytes read from this memory")
- ;
- bytesInstRead
- .name(name() + ".bytes_inst_read")
- .desc("Number of instructions bytes read from this memory")
- ;
- bytesWritten
- .name(name() + ".bytes_written")
- .desc("Number of bytes written to this memory")
- ;
- numReads
- .name(name() + ".num_reads")
- .desc("Number of read requests responded to by this memory")
- ;
- numWrites
- .name(name() + ".num_writes")
- .desc("Number of write requests responded to by this memory")
- ;
- numOther
- .name(name() + ".num_other")
- .desc("Number of other requests responded to by this memory")
- ;
- bwRead
- .name(name() + ".bw_read")
- .desc("Total read bandwidth from this memory (bytes/s)")
- .precision(0)
- .prereq(bytesRead)
- ;
- bwInstRead
- .name(name() + ".bw_inst_read")
- .desc("Instruction read bandwidth from this memory (bytes/s)")
- .precision(0)
- .prereq(bytesInstRead)
- ;
- bwWrite
- .name(name() + ".bw_write")
- .desc("Write bandwidth from this memory (bytes/s)")
- .precision(0)
- .prereq(bytesWritten)
- ;
- bwTotal
- .name(name() + ".bw_total")
- .desc("Total bandwidth to/from this memory (bytes/s)")
- .precision(0)
- .prereq(bwTotal)
- ;
- bwRead = bytesRead / simSeconds;
- bwInstRead = bytesInstRead / simSeconds;
- bwWrite = bytesWritten / simSeconds;
- bwTotal = (bytesRead + bytesWritten) / simSeconds;
-}
-unsigned
-PhysicalMemory::deviceBlockSize() const
-{
- //Can accept anysize request
- return 0;
-}
-
-Tick
-PhysicalMemory::calculateLatency(PacketPtr pkt)
-{
- Tick latency = lat;
- if (lat_var != 0)
- latency += random_mt.random<Tick>(0, lat_var);
- return latency;
-}
-
-
-
-// Add load-locked to tracking list. Should only be called if the
-// operation is a load and the LLSC flag is set.
-void
-PhysicalMemory::trackLoadLocked(PacketPtr pkt)
+PhysicalMemory::PhysicalMemory(const vector<AbstractMemory*>& _memories) :
+ size(0)
{
- Request *req = pkt->req;
- Addr paddr = LockedAddr::mask(req->getPaddr());
+ for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
+ m != _memories.end(); ++m) {
+ // only add the memory if it is part of the global address map
+ if ((*m)->isInAddrMap()) {
+ memories.push_back(*m);
- // first we check if we already have a locked addr for this
- // xc. Since each xc only gets one, we just update the
- // existing record with the new address.
- list<LockedAddr>::iterator i;
+ // calculate the total size once and for all
+ size += (*m)->size();
- for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
- if (i->matchesContext(req)) {
- DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
- req->contextId(), paddr);
- i->addr = paddr;
- return;
+ // add the range to our interval tree and make sure it does not
+ // intersect an existing range
+ if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
+ fatal("Memory address range for %s is overlapping\n",
+ (*m)->name());
}
+ DPRINTF(BusAddrRanges,
+ "Skipping memory %s that is not in global address map\n",
+ (*m)->name());
}
-
- // no record for this xc: need to allocate a new one
- DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
- req->contextId(), paddr);
- lockedAddrList.push_front(LockedAddr(req));
+ rangeCache.invalidate();
}
-
-// Called on *writes* only... both regular stores and
-// store-conditional operations. Check for conventional stores which
-// conflict with locked addresses, and for success/failure of store
-// conditionals.
bool
-PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
-{
- Request *req = pkt->req;
- Addr paddr = LockedAddr::mask(req->getPaddr());
- bool isLLSC = pkt->isLLSC();
-
- // Initialize return value. Non-conditional stores always
- // succeed. Assume conditional stores will fail until proven
- // otherwise.
- bool success = !isLLSC;
-
- // Iterate over list. Note that there could be multiple matching
- // records, as more than one context could have done a load locked
- // to this location.
- list<LockedAddr>::iterator i = lockedAddrList.begin();
-
- while (i != lockedAddrList.end()) {
-
- if (i->addr == paddr) {
- // we have a matching address
-
- if (isLLSC && i->matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
- DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
- req->contextId(), paddr);
- success = true;
- }
-
- // Get rid of our record of this lock and advance to next
- DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
- i->contextId, paddr);
- i = lockedAddrList.erase(i);
- }
- else {
- // no match: advance to next record
- ++i;
+PhysicalMemory::isMemAddr(Addr addr) const
+{
+ // see if the address is within the last matched range
+ if (addr != rangeCache) {
+ // lookup in the interval tree
+ range_map<Addr, AbstractMemory*>::const_iterator r =
+ addrMap.find(addr);
+ if (r == addrMap.end()) {
+ // not in the cache, and not in the tree
+ return false;
}
+ // the range is in the tree, update the cache
+ rangeCache = r->first;
}
- if (isLLSC) {
- req->setExtraData(success ? 1 : 0);
- }
-
- return success;
-}
-
-
-#if TRACING_ON
-
-#define CASE(A, T) \
- case sizeof(T): \
- DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
- A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
- break
-
-
-#define TRACE_PACKET(A) \
- do { \
- switch (pkt->getSize()) { \
- CASE(A, uint64_t); \
- CASE(A, uint32_t); \
- CASE(A, uint16_t); \
- CASE(A, uint8_t); \
- default: \
- DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
- A, pkt->getSize(), pkt->getAddr()); \
- DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
- } \
- } while (0)
-
-#else
-
-#define TRACE_PACKET(A)
-
-#endif
-
-Tick
-PhysicalMemory::doAtomicAccess(PacketPtr pkt)
-{
- assert(pkt->getAddr() >= start() &&
- pkt->getAddr() + pkt->getSize() <= start() + size());
-
- if (pkt->memInhibitAsserted()) {
- DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
- pkt->getAddr());
- return 0;
- }
-
- uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
-
- if (pkt->cmd == MemCmd::SwapReq) {
- IntReg overwrite_val;
- bool overwrite_mem;
- uint64_t condition_val64;
- uint32_t condition_val32;
-
- if (!pmemAddr)
- panic("Swap only works if there is real memory (i.e. null=False)");
- assert(sizeof(IntReg) >= pkt->getSize());
-
- overwrite_mem = true;
- // keep a copy of our possible write value, and copy what is at the
- // memory address into the packet
- std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
- std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ assert(addrMap.find(addr) != addrMap.end());
- if (pkt->req->isCondSwap()) {
- if (pkt->getSize() == sizeof(uint64_t)) {
- condition_val64 = pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
- sizeof(uint64_t));
- } else if (pkt->getSize() == sizeof(uint32_t)) {
- condition_val32 = (uint32_t)pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
- sizeof(uint32_t));
- } else
- panic("Invalid size for conditional read/write\n");
- }
-
- if (overwrite_mem)
- std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
-
- assert(!pkt->req->isInstFetch());
- TRACE_PACKET("Read/Write");
- numOther++;
- } else if (pkt->isRead()) {
- assert(!pkt->isWrite());
- if (pkt->isLLSC()) {
- trackLoadLocked(pkt);
- }
- if (pmemAddr)
- memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
- TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
- numReads++;
- bytesRead += pkt->getSize();
- if (pkt->req->isInstFetch())
- bytesInstRead += pkt->getSize();
- } else if (pkt->isWrite()) {
- if (writeOK(pkt)) {
- if (pmemAddr)
- memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
- assert(!pkt->req->isInstFetch());
- TRACE_PACKET("Write");
- numWrites++;
- bytesWritten += pkt->getSize();
- }
- } else if (pkt->isInvalidate()) {
- //upgrade or invalidate
- if (pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- }
- } else {
- panic("unimplemented");
- }
-
- if (pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- }
- return calculateLatency(pkt);
-}
-
-
-void
-PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
-{
- assert(pkt->getAddr() >= start() &&
- pkt->getAddr() + pkt->getSize() <= start() + size());
-
-
- uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
-
- if (pkt->isRead()) {
- if (pmemAddr)
- memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
- TRACE_PACKET("Read");
- pkt->makeAtomicResponse();
- } else if (pkt->isWrite()) {
- if (pmemAddr)
- memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
- TRACE_PACKET("Write");
- pkt->makeAtomicResponse();
- } else if (pkt->isPrint()) {
- Packet::PrintReqState *prs =
- dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
- // Need to call printLabels() explicitly since we're not going
- // through printObj().
- prs->printLabels();
- // Right now we just print the single byte at the specified address.
- ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
- } else {
- panic("PhysicalMemory: unimplemented functional command %s",
- pkt->cmdString());
- }
-}
-
-
-SlavePort &
-PhysicalMemory::getSlavePort(const std::string &if_name, int idx)
-{
- if (if_name != "port") {
- return MemObject::getSlavePort(if_name, idx);
- } else {
- if (idx >= static_cast<int>(ports.size())) {
- fatal("PhysicalMemory::getSlavePort: unknown index %d\n", idx);
- }
-
- return *ports[idx];
- }
-}
-
-PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
- PhysicalMemory *_memory)
- : SimpleTimingPort(_name, _memory), memory(_memory)
-{ }
-
-AddrRangeList
-PhysicalMemory::MemoryPort::getAddrRanges()
-{
- return memory->getAddrRanges();
+ // either matched the cache or found in the tree
+ return true;
}
AddrRangeList
-PhysicalMemory::getAddrRanges()
+PhysicalMemory::getConfAddrRanges() const
{
+ // this could be done once in the constructor, but since it is unlikely to
+ // be called more than once the iteration should not be a problem
AddrRangeList ranges;
- ranges.push_back(RangeSize(start(), size()));
- return ranges;
-}
-
-unsigned
-PhysicalMemory::MemoryPort::deviceBlockSize() const
-{
- return memory->deviceBlockSize();
-}
-
-Tick
-PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
-{
- return memory->doAtomicAccess(pkt);
-}
-
-void
-PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
-{
- pkt->pushLabel(memory->name());
-
- if (!queue.checkFunctional(pkt)) {
- // Default implementation of SimpleTimingPort::recvFunctional()
- // calls recvAtomic() and throws away the latency; we can save a
- // little here by just not calculating the latency.
- memory->doFunctionalAccess(pkt);
- }
-
- pkt->popLabel();
-}
-
-unsigned int
-PhysicalMemory::drain(Event *de)
-{
- int count = 0;
- for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
- count += (*pi)->drain(de);
+ for (vector<AbstractMemory*>::const_iterator m = memories.begin();
+ m != memories.end(); ++m) {
+ if ((*m)->isConfReported()) {
+ ranges.push_back((*m)->getAddrRange());
+ }
}
- if (count)
- changeState(Draining);
- else
- changeState(Drained);
- return count;
+ return ranges;
}
void
-PhysicalMemory::serialize(ostream &os)
+PhysicalMemory::access(PacketPtr pkt)
{
- if (!pmemAddr)
- return;
-
- gzFile compressedMem;
- string filename = name() + ".physmem";
-
- SERIALIZE_SCALAR(filename);
- SERIALIZE_SCALAR(_size);
-
- // write memory file
- string thefile = Checkpoint::dir() + "/" + filename.c_str();
- int fd = creat(thefile.c_str(), 0664);
- if (fd < 0) {
- perror("creat");
- fatal("Can't open physical memory checkpoint file '%s'\n", filename);
- }
-
- compressedMem = gzdopen(fd, "wb");
- if (compressedMem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
-
- if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
- fatal("Write failed on physical memory checkpoint file '%s'\n",
- filename);
- }
-
- if (gzclose(compressedMem))
- fatal("Close failed on physical memory checkpoint file '%s'\n",
- filename);
-
- list<LockedAddr>::iterator i = lockedAddrList.begin();
-
- vector<Addr> lal_addr;
- vector<int> lal_cid;
- while (i != lockedAddrList.end()) {
- lal_addr.push_back(i->addr);
- lal_cid.push_back(i->contextId);
- i++;
- }
- arrayParamOut(os, "lal_addr", lal_addr);
- arrayParamOut(os, "lal_cid", lal_cid);
+ assert(pkt->isRequest());
+ Addr addr = pkt->getAddr();
+ range_map<Addr, AbstractMemory*>::const_iterator m = addrMap.find(addr);
+ assert(m != addrMap.end());
+ m->second->access(pkt);
}
void
-PhysicalMemory::unserialize(Checkpoint *cp, const string §ion)
-{
- if (!pmemAddr)
- return;
-
- gzFile compressedMem;
- long *tempPage;
- long *pmem_current;
- uint64_t curSize;
- uint32_t bytesRead;
- const uint32_t chunkSize = 16384;
-
- string filename;
-
- UNSERIALIZE_SCALAR(filename);
-
- filename = cp->cptDir + "/" + filename;
-
- // mmap memoryfile
- int fd = open(filename.c_str(), O_RDONLY);
- if (fd < 0) {
- perror("open");
- fatal("Can't open physical memory checkpoint file '%s'", filename);
- }
-
- compressedMem = gzdopen(fd, "rb");
- if (compressedMem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
-
- // unmap file that was mmapped in the constructor
- // This is done here to make sure that gzip and open don't muck with our
- // nice large space of memory before we reallocate it
- munmap((char*)pmemAddr, size());
-
- UNSERIALIZE_SCALAR(_size);
- if (size() > params()->range.size())
- fatal("Memory size has changed! size %lld, param size %lld\n",
- size(), params()->range.size());
-
- pmemAddr = (uint8_t *)mmap(NULL, size(),
- PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
-
- if (pmemAddr == (void *)MAP_FAILED) {
- perror("mmap");
- fatal("Could not mmap physical memory!\n");
- }
-
- curSize = 0;
- tempPage = (long*)malloc(chunkSize);
- if (tempPage == NULL)
- fatal("Unable to malloc memory to read file %s\n", filename);
-
- /* Only copy bytes that are non-zero, so we don't give the VM system hell */
- while (curSize < size()) {
- bytesRead = gzread(compressedMem, tempPage, chunkSize);
- if (bytesRead == 0)
- break;
-
- assert(bytesRead % sizeof(long) == 0);
-
- for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
- {
- if (*(tempPage+x) != 0) {
- pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
- *pmem_current = *(tempPage+x);
- }
- }
- curSize += bytesRead;
- }
-
- free(tempPage);
-
- if (gzclose(compressedMem))
- fatal("Close failed on physical memory checkpoint file '%s'\n",
- filename);
-
- vector<Addr> lal_addr;
- vector<int> lal_cid;
- arrayParamIn(cp, section, "lal_addr", lal_addr);
- arrayParamIn(cp, section, "lal_cid", lal_cid);
- for(int i = 0; i < lal_addr.size(); i++)
- lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
-}
-
-PhysicalMemory *
-PhysicalMemoryParams::create()
+PhysicalMemory::functionalAccess(PacketPtr pkt)
{
- return new PhysicalMemory(this);
+ assert(pkt->isRequest());
+ Addr addr = pkt->getAddr();
+ range_map<Addr, AbstractMemory*>::const_iterator m = addrMap.find(addr);
+ assert(m != addrMap.end());
+ m->second->functionalAccess(pkt);
}
/*
- * Copyright (c) 2001-2005 The Regents of The University of Michigan
- * All rights reserved.
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Ron Dreslinski
- */
-
-/* @file
+ * Authors: Andreas Hansson
*/
#ifndef __PHYSICAL_MEMORY_HH__
#define __PHYSICAL_MEMORY_HH__
-#include <map>
-#include <string>
-
-#include "base/range.hh"
-#include "base/statistics.hh"
-#include "mem/mem_object.hh"
+#include "base/range_map.hh"
+#include "mem/abstract_mem.hh"
#include "mem/packet.hh"
-#include "mem/tport.hh"
-#include "params/PhysicalMemory.hh"
-#include "sim/eventq.hh"
-#include "sim/stats.hh"
-
-//
-// Functional model for a contiguous block of physical memory. (i.e. RAM)
-//
-class PhysicalMemory : public MemObject
-{
- protected:
-
- class MemoryPort : public SimpleTimingPort
- {
- PhysicalMemory *memory;
-
- public:
- MemoryPort(const std::string &_name, PhysicalMemory *_memory);
-
- protected:
+/**
+ * The physical memory encapsulates all memories in the system and
+ * provides basic functionality for accessing those memories without
+ * going through the memory system and interconnect.
+ */
+class PhysicalMemory
+{
- virtual Tick recvAtomic(PacketPtr pkt);
+ private:
- virtual void recvFunctional(PacketPtr pkt);
+ // Global address map
+ range_map<Addr, AbstractMemory* > addrMap;
- virtual AddrRangeList getAddrRanges();
+ // a mutable cache for the last range that matched an address
+ mutable Range<Addr> rangeCache;
- virtual unsigned deviceBlockSize() const;
- };
+ // All address-mapped memories
+ std::vector<AbstractMemory*> memories;
- int numPorts;
+ // The total memory size
+ uint64_t size;
+ // Prevent copying
+ PhysicalMemory(const PhysicalMemory&);
- private:
- // prevent copying of a MainMemory object
- PhysicalMemory(const PhysicalMemory &specmem);
- const PhysicalMemory &operator=(const PhysicalMemory &specmem);
-
- protected:
-
- class LockedAddr {
- public:
- // on alpha, minimum LL/SC granularity is 16 bytes, so lower
- // bits need to masked off.
- static const Addr Addr_Mask = 0xf;
-
- static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
-
- Addr addr; // locked address
- int contextId; // locking hw context
-
- // check for matching execution context
- bool matchesContext(Request *req)
- {
- return (contextId == req->contextId());
- }
-
- LockedAddr(Request *req)
- : addr(mask(req->getPaddr())),
- contextId(req->contextId())
- {
- }
- // constructor for unserialization use
- LockedAddr(Addr _addr, int _cid)
- : addr(_addr), contextId(_cid)
- {
- }
- };
-
- std::list<LockedAddr> lockedAddrList;
-
- // helper function for checkLockedAddrs(): we really want to
- // inline a quick check for an empty locked addr list (hopefully
- // the common case), and do the full list search (if necessary) in
- // this out-of-line function
- bool checkLockedAddrList(PacketPtr pkt);
-
- // Record the address of a load-locked operation so that we can
- // clear the execution context's lock flag if a matching store is
- // performed
- void trackLoadLocked(PacketPtr pkt);
-
- // Compare a store address with any locked addresses so we can
- // clear the lock flag appropriately. Return value set to 'false'
- // if store operation should be suppressed (because it was a
- // conditional store and the address was no longer locked by the
- // requesting execution context), 'true' otherwise. Note that
- // this method must be called on *all* stores since even
- // non-conditional stores must clear any matching lock addresses.
- bool writeOK(PacketPtr pkt) {
- Request *req = pkt->req;
- if (lockedAddrList.empty()) {
- // no locked addrs: nothing to check, store_conditional fails
- bool isLLSC = pkt->isLLSC();
- if (isLLSC) {
- req->setExtraData(0);
- }
- return !isLLSC; // only do write if not an sc
- } else {
- // iterate over list...
- return checkLockedAddrList(pkt);
- }
- }
-
- uint8_t *pmemAddr;
- Tick lat;
- Tick lat_var;
- std::vector<MemoryPort*> ports;
- typedef std::vector<MemoryPort*>::iterator PortIterator;
-
- uint64_t _size;
- uint64_t _start;
-
- /** Number of total bytes read from this memory */
- Stats::Scalar bytesRead;
- /** Number of instruction bytes read from this memory */
- Stats::Scalar bytesInstRead;
- /** Number of bytes written to this memory */
- Stats::Scalar bytesWritten;
- /** Number of read requests */
- Stats::Scalar numReads;
- /** Number of write requests */
- Stats::Scalar numWrites;
- /** Number of other requests */
- Stats::Scalar numOther;
- /** Read bandwidth from this memory */
- Stats::Formula bwRead;
- /** Read bandwidth from this memory */
- Stats::Formula bwInstRead;
- /** Write bandwidth from this memory */
- Stats::Formula bwWrite;
- /** Total bandwidth from this memory */
- Stats::Formula bwTotal;
+ // Prevent assignment
+ PhysicalMemory& operator=(const PhysicalMemory&);
public:
- uint64_t size() { return _size; }
- uint64_t start() { return _start; }
- public:
- typedef PhysicalMemoryParams Params;
- PhysicalMemory(const Params *p);
- virtual ~PhysicalMemory();
-
- const Params *
- params() const
- {
- return dynamic_cast<const Params *>(_params);
- }
-
- public:
- unsigned deviceBlockSize() const;
- AddrRangeList getAddrRanges();
- virtual SlavePort &getSlavePort(const std::string &if_name, int idx = -1);
- void virtual init();
- unsigned int drain(Event *de);
+ /**
+ * Create a physical memory object, wrapping a number of memories.
+ */
+ PhysicalMemory(const std::vector<AbstractMemory*>& _memories);
- Tick doAtomicAccess(PacketPtr pkt);
- void doFunctionalAccess(PacketPtr pkt);
+ /**
+ * Nothing to destruct.
+ */
+ ~PhysicalMemory() { }
+
+ /**
+ * Check if a physical address is within a range of a memory that
+ * is part of the global address map.
+ *
+ * @param addr A physical address
+ * @return Whether the address corresponds to a memory
+ */
+ bool isMemAddr(Addr addr) const;
+ /**
+ * Get the memory ranges for all memories that are to be reported
+ * to the configuration table.
+ *
+ * @return All configuration table memory ranges
+ */
+ AddrRangeList getConfAddrRanges() const;
- protected:
- virtual Tick calculateLatency(PacketPtr pkt);
+ /**
+ * Get the total physical memory size.
+ *
+ * @return The sum of all memory sizes
+ */
+ uint64_t totalSize() const { return size; }
- public:
- /**
- * Register Statistics
+ /**
+ *
*/
- void regStats();
+ void access(PacketPtr pkt);
+ void functionalAccess(PacketPtr pkt);
+};
+
- virtual void serialize(std::ostream &os);
- virtual void unserialize(Checkpoint *cp, const std::string §ion);
-};
#endif //__PHYSICAL_MEMORY_HH__
// The following command performs the real functional access.
// This line should be removed once Ruby supplies the official version
// of data.
- ruby_port->system->physmem->doFunctionalAccess(pkt);
+ ruby_port->system->getPhysMem().functionalAccess(pkt);
}
// turn packet around to go back to requester if response expected
DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
if (accessPhysMem) {
- ruby_port->system->physmem->doAtomicAccess(pkt);
+ ruby_port->system->getPhysMem().access(pkt);
} else if (needsResponse) {
pkt->makeResponse();
}
bool
RubyPort::M5Port::isPhysMemAddress(Addr addr)
{
- return ruby_port->system->isMemory(addr);
+ return ruby_port->system->isMemAddr(addr);
}
unsigned
--- /dev/null
+/*
+ * Copyright (c) 2010-2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Ali Saidi
+ * Andreas Hansson
+ */
+
+#include "base/random.hh"
+#include "mem/simple_mem.hh"
+
+using namespace std;
+
+SimpleMemory::SimpleMemory(const Params* p) :
+ AbstractMemory(p),
+ lat(p->latency), lat_var(p->latency_var)
+{
+ for (size_t i = 0; i < p->port_port_connection_count; ++i) {
+ ports.push_back(new MemoryPort(csprintf("%s-port-%d", name(), i),
+ *this));
+ }
+}
+
+void
+SimpleMemory::init()
+{
+ for (vector<MemoryPort*>::iterator p = ports.begin(); p != ports.end();
+ ++p) {
+ if (!(*p)->isConnected()) {
+ fatal("SimpleMemory port %s is unconnected!\n", (*p)->name());
+ } else {
+ (*p)->sendRangeChange();
+ }
+ }
+}
+
+Tick
+SimpleMemory::calculateLatency(PacketPtr pkt)
+{
+ if (pkt->memInhibitAsserted()) {
+ return 0;
+ } else {
+ Tick latency = lat;
+ if (lat_var != 0)
+ latency += random_mt.random<Tick>(0, lat_var);
+ return latency;
+ }
+}
+
+Tick
+SimpleMemory::doAtomicAccess(PacketPtr pkt)
+{
+ access(pkt);
+ return calculateLatency(pkt);
+}
+
+void
+SimpleMemory::doFunctionalAccess(PacketPtr pkt)
+{
+ functionalAccess(pkt);
+}
+
+SlavePort &
+SimpleMemory::getSlavePort(const std::string &if_name, int idx)
+{
+ if (if_name != "port") {
+ return MemObject::getSlavePort(if_name, idx);
+ } else {
+ if (idx >= static_cast<int>(ports.size())) {
+ fatal("SimpleMemory::getSlavePort: unknown index %d\n", idx);
+ }
+
+ return *ports[idx];
+ }
+}
+
+unsigned int
+SimpleMemory::drain(Event *de)
+{
+ int count = 0;
+ for (vector<MemoryPort*>::iterator p = ports.begin(); p != ports.end();
+ ++p) {
+ count += (*p)->drain(de);
+ }
+
+ if (count)
+ changeState(Draining);
+ else
+ changeState(Drained);
+ return count;
+}
+
+SimpleMemory::MemoryPort::MemoryPort(const std::string& _name,
+ SimpleMemory& _memory)
+ : SimpleTimingPort(_name, &_memory), memory(_memory)
+{ }
+
+AddrRangeList
+SimpleMemory::MemoryPort::getAddrRanges()
+{
+ AddrRangeList ranges;
+ ranges.push_back(memory.getAddrRange());
+ return ranges;
+}
+
+Tick
+SimpleMemory::MemoryPort::recvAtomic(PacketPtr pkt)
+{
+ return memory.doAtomicAccess(pkt);
+}
+
+void
+SimpleMemory::MemoryPort::recvFunctional(PacketPtr pkt)
+{
+ pkt->pushLabel(memory.name());
+
+ if (!queue.checkFunctional(pkt)) {
+ // Default implementation of SimpleTimingPort::recvFunctional()
+ // calls recvAtomic() and throws away the latency; we can save a
+ // little here by just not calculating the latency.
+ memory.doFunctionalAccess(pkt);
+ }
+
+ pkt->popLabel();
+}
+
+SimpleMemory*
+SimpleMemoryParams::create()
+{
+ return new SimpleMemory(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Andreas Hansson
+ */
+
+/**
+ * @file
+ * SimpleMemory declaration
+ */
+
+#ifndef __SIMPLE_MEMORY_HH__
+#define __SIMPLE_MEMORY_HH__
+
+#include "mem/abstract_mem.hh"
+#include "mem/tport.hh"
+#include "params/SimpleMemory.hh"
+
+/**
+ * The simple memory is a basic multi-ported memory with an infinite
+ * throughput and a fixed latency, potentially with a variance added
+ * to it. It uses a SimpleTimingPort to implement the timing accesses.
+ */
+class SimpleMemory : public AbstractMemory
+{
+
+ private:
+
+ class MemoryPort : public SimpleTimingPort
+ {
+ SimpleMemory& memory;
+
+ public:
+
+ MemoryPort(const std::string& _name, SimpleMemory& _memory);
+
+ protected:
+
+ virtual Tick recvAtomic(PacketPtr pkt);
+
+ virtual void recvFunctional(PacketPtr pkt);
+
+ virtual AddrRangeList getAddrRanges();
+
+ };
+
+ std::vector<MemoryPort*> ports;
+
+ Tick lat;
+ Tick lat_var;
+
+ public:
+
+ typedef SimpleMemoryParams Params;
+ SimpleMemory(const Params *p);
+ virtual ~SimpleMemory() { }
+
+ unsigned int drain(Event* de);
+
+ virtual SlavePort& getSlavePort(const std::string& if_name, int idx = -1);
+ virtual void init();
+
+ const Params *
+ params() const
+ {
+ return dynamic_cast<const Params *>(_params);
+ }
+
+ protected:
+
+ Tick doAtomicAccess(PacketPtr pkt);
+ void doFunctionalAccess(PacketPtr pkt);
+ virtual Tick calculateLatency(PacketPtr pkt);
+
+};
+
+#endif //__SIMPLE_MEMORY_HH__
from m5.params import *
from m5.proxy import *
-from PhysicalMemory import *
+from SimpleMemory import *
class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing']
void setMemoryMode(Enums::MemoryMode mode);
''')
- physmem = Param.PhysicalMemory("Physical Memory")
+ memories = VectorParam.AbstractMemory(Self.all,
+ "All memories in the system")
mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in")
- memories = VectorParam.PhysicalMemory(Self.all, "All memories is the system")
work_item_id = Param.Int(-1, "specific work item id")
num_work_ids = Param.Int(16, "Number of distinct work item types")
work_begin_cpu_id_exit = Param.Int(-1,
System::System(Params *p)
: MemObject(p), _systemPort("system_port", this),
- physmem(p->physmem),
_numContexts(0),
pagePtr(0),
init_param(p->init_param),
virtProxy(_systemPort),
loadAddrMask(p->load_addr_mask),
nextPID(0),
+ physmem(p->memories),
memoryMode(p->mem_mode),
workItemsBegin(0),
workItemsEnd(0),
// add self to global system list
systemList.push_back(this);
- /** Keep track of all memories we can execute code out of
- * in our system
- */
- for (int x = 0; x < p->memories.size(); x++) {
- if (!p->memories[x])
- continue;
- memRanges.push_back(RangeSize(p->memories[x]->start(),
- p->memories[x]->size()));
- }
-
if (FullSystem) {
kernelSymtab = new SymbolTable;
if (!debugSymbolTable)
{
Addr return_addr = pagePtr << LogVMPageSize;
pagePtr += npages;
- if (pagePtr > physmem->size())
+ if (pagePtr > physmem.totalSize())
fatal("Out of memory, please increase size of physical memory.");
return return_addr;
}
Addr
-System::memSize()
+System::memSize() const
{
- return physmem->size();
+ return physmem.totalSize();
}
Addr
-System::freeMemSize()
+System::freeMemSize() const
{
- return physmem->size() - (pagePtr << LogVMPageSize);
+ return physmem.totalSize() - (pagePtr << LogVMPageSize);
}
bool
-System::isMemory(const Addr addr) const
+System::isMemAddr(Addr addr) const
{
- std::list<Range<Addr> >::const_iterator i;
- for (i = memRanges.begin(); i != memRanges.end(); i++) {
- if (*i == addr)
- return true;
- }
- return false;
+ return physmem.isMemAddr(addr);
}
void
#include "mem/fs_translating_port_proxy.hh"
#include "mem/mem_object.hh"
#include "mem/port.hh"
+#include "mem/physical.hh"
#include "params/System.hh"
class BaseCPU;
class BaseRemoteGDB;
class GDBListener;
class ObjectFile;
-class PhysicalMemory;
class Platform;
class ThreadContext;
*/
void setMemoryMode(Enums::MemoryMode mode);
- PhysicalMemory *physmem;
PCEventQueue pcEventQueue;
std::vector<ThreadContext *> threadContexts;
* system. These threads could be Active or Suspended. */
int numRunningContexts();
- /** List to store ranges of memories in this system */
- AddrRangeList memRanges;
-
- /** check if an address points to valid system memory
- * and thus we can fetch instructions out of it
- */
- bool isMemory(const Addr addr) const;
-
Addr pagePtr;
uint64_t init_param;
return nextPID++;
}
+ /** Get a pointer to access the physical memory of the system */
+ PhysicalMemory& getPhysMem() { return physmem; }
+
/** Amount of physical memory that is still free */
- Addr freeMemSize();
+ Addr freeMemSize() const;
/** Amount of physical memory that exists */
- Addr memSize();
+ Addr memSize() const;
+
+ /**
+ * Check if a physical address is within a range of a memory that
+ * is part of the global address map.
+ *
+ * @param addr A physical address
+ * @return Whether the address corresponds to a memory
+ */
+ bool isMemAddr(Addr addr) const;
protected:
+
+ PhysicalMemory physmem;
+
Enums::MemoryMode memoryMode;
uint64_t workItemsBegin;
uint64_t workItemsEnd;
cpu.clock = '2GHz'
system = System(cpu = cpu,
- physmem = PhysicalMemory(),
+ physmem = SimpleMemory(),
membus = Bus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
# system simulated
system = System(cpu = cpus,
- funcmem = PhysicalMemory(),
- physmem = PhysicalMemory())
+ funcmem = SimpleMemory(in_addr_map = False),
+ physmem = SimpleMemory())
Ruby.create_system(options, system)
cpus = [ MemTest() for i in xrange(nb_cores) ]
# system simulated
-system = System(cpu = cpus, funcmem = PhysicalMemory(),
- physmem = PhysicalMemory(),
+system = System(cpu = cpus, funcmem = SimpleMemory(in_addr_map = False),
+ physmem = SimpleMemory(),
membus = Bus(clock="500GHz", width=16))
# l2cache & bus
cpu.clock = '2GHz'
system = System(cpu = cpu,
- physmem = PhysicalMemory(),
+ physmem = SimpleMemory(),
membus = Bus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
cpus = [ DerivO3CPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
-system = System(cpu = cpus, physmem = PhysicalMemory(), membus =
-Bus())
+system = System(cpu = cpus, physmem = SimpleMemory(), membus = Bus())
# l2cache & bus
system.toL2Bus = Bus()
cpu.clock = '2GHz'
system = System(cpu = cpu,
- physmem = PhysicalMemory(),
+ physmem = SimpleMemory(),
membus = Bus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
#
tester = RubyTester(checks_to_complete = 100, wakeup_frequency = 10)
-system = System(tester = tester, physmem = PhysicalMemory())
+system = System(tester = tester, physmem = SimpleMemory())
Ruby.create_system(options, system)
from m5.objects import *
system = System(cpu = AtomicSimpleCPU(cpu_id=0),
- physmem = PhysicalMemory(),
+ physmem = SimpleMemory(),
membus = Bus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
cpus = [ AtomicSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
-system = System(cpu = cpus, physmem = PhysicalMemory(range = AddrRange('1024MB')), membus =
-Bus())
+system = System(cpu = cpus,
+ physmem = SimpleMemory(range = AddrRange('1024MB')),
+ membus = Bus())
# l2cache & bus
system.toL2Bus = Bus()
from m5.objects import *
system = System(cpu = AtomicSimpleCPU(cpu_id=0),
- physmem = PhysicalMemory(),
+ physmem = SimpleMemory(),
membus = Bus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
options.num_cpus = nb_cores
# system simulated
-system = System(cpu = cpus, physmem = PhysicalMemory())
+system = System(cpu = cpus, physmem = SimpleMemory())
Ruby.create_system(options, system)
cpus = [ TimingSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
-system = System(cpu = cpus, physmem = PhysicalMemory(), membus =
-Bus())
+system = System(cpu = cpus, physmem = SimpleMemory(), membus = Bus())
# l2cache & bus
system.toL2Bus = Bus()
options.num_cpus = 1
cpu = TimingSimpleCPU(cpu_id=0)
-system = System(cpu = cpu, physmem = PhysicalMemory())
+system = System(cpu = cpu, physmem = SimpleMemory())
Ruby.create_system(options, system)
MyL1Cache(size = '256kB'),
MyCache(size = '2MB', latency='10ns'))
system = System(cpu = cpu,
- physmem = PhysicalMemory(),
+ physmem = SimpleMemory(),
membus = Bus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master