/*
- * Copyright (c) 2001-2005 The Regents of The University of Michigan
- * All rights reserved.
+ * Copyright (c) 2012, 2014, 2018 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Ron Dreslinski
- * Ali Saidi
+ * Authors: Andreas Hansson
*/
-#include <sys/types.h>
-#include <sys/mman.h>
-#include <errno.h>
+#include "mem/physical.hh"
+
#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/user.h>
#include <unistd.h>
#include <zlib.h>
+#include <cerrno>
+#include <climits>
+#include <cstdio>
#include <iostream>
#include <string>
-#include "arch/isa_traits.hh"
-#include "base/misc.hh"
-#include "config/full_system.hh"
-#include "mem/packet_access.hh"
-#include "mem/physical.hh"
-#include "sim/eventq.hh"
-#include "sim/host.hh"
+#include "base/trace.hh"
+#include "debug/AddrRanges.hh"
+#include "debug/Checkpoint.hh"
+#include "mem/abstract_mem.hh"
+
+/**
+ * On Linux, MAP_NORESERVE allow us to simulate a very large memory
+ * without committing to actually providing the swap space on the
+ * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
+ * so simply make it 0.
+ */
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#ifndef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+#endif
using namespace std;
-using namespace TheISA;
-PhysicalMemory::PhysicalMemory(const Params *p)
- : MemObject(p), pmemAddr(NULL), lat(p->latency)
+PhysicalMemory::PhysicalMemory(const string& _name,
+ const vector<AbstractMemory*>& _memories,
+ bool mmap_using_noreserve) :
+ _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve)
{
- if (params()->range.size() % TheISA::PageBytes != 0)
- panic("Memory Size not divisible by page size\n");
-
- int map_flags = MAP_ANON | MAP_PRIVATE;
- pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(),
- PROT_READ | PROT_WRITE, map_flags, -1, 0);
-
- if (pmemAddr == (void *)MAP_FAILED) {
- perror("mmap");
- fatal("Could not mmap!\n");
+ if (mmap_using_noreserve)
+ warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
+
+ // add the memories from the system to the address map as
+ // appropriate
+ for (const auto& m : _memories) {
+ // only add the memory if it is part of the global address map
+ if (m->isInAddrMap()) {
+ memories.push_back(m);
+
+ // calculate the total size once and for all
+ size += m->size();
+
+ // add the range to our interval tree and make sure it does not
+ // intersect an existing range
+ fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
+ "Memory address range for %s is overlapping\n",
+ m->name());
+ } else {
+ // this type of memory is used e.g. as reference memory by
+ // Ruby, and they also needs a backing store, but should
+ // not be part of the global address map
+ DPRINTF(AddrRanges,
+ "Skipping memory %s that is not in global address map\n",
+ m->name());
+
+ // sanity check
+ fatal_if(m->getAddrRange().interleaved(),
+ "Memory %s that is not in the global address map cannot "
+ "be interleaved\n", m->name());
+
+ // simply do it independently, also note that this kind of
+ // memories are allowed to overlap in the logic address
+ // map
+ vector<AbstractMemory*> unmapped_mems{m};
+ createBackingStore(m->getAddrRange(), unmapped_mems,
+ m->isConfReported(), m->isInAddrMap(),
+ m->isKvmMap());
+ }
}
- //If requested, initialize all the memory to 0
- if (p->zero)
- memset(pmemAddr, 0, p->range.size());
-
- pagePtr = 0;
-
- cachedSize = params()->range.size();
- cachedStart = params()->range.start;
+ // iterate over the increasing addresses and chunks of contiguous
+ // space to be mapped to backing store, create it and inform the
+ // memories
+ vector<AddrRange> intlv_ranges;
+ vector<AbstractMemory*> curr_memories;
+ for (const auto& r : addrMap) {
+ // simply skip past all memories that are null and hence do
+ // not need any backing store
+ if (!r.second->isNull()) {
+ // if the range is interleaved then save it for now
+ if (r.first.interleaved()) {
+ // if we already got interleaved ranges that are not
+ // part of the same range, then first do a merge
+ // before we add the new one
+ if (!intlv_ranges.empty() &&
+ !intlv_ranges.back().mergesWith(r.first)) {
+ AddrRange merged_range(intlv_ranges);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
+
+ intlv_ranges.clear();
+ curr_memories.clear();
+ }
+ intlv_ranges.push_back(r.first);
+ curr_memories.push_back(r.second);
+ } else {
+ vector<AbstractMemory*> single_memory{r.second};
+ createBackingStore(r.first, single_memory,
+ r.second->isConfReported(),
+ r.second->isInAddrMap(),
+ r.second->isKvmMap());
+ }
+ }
+ }
+ // if there is still interleaved ranges waiting to be merged, go
+ // ahead and do it
+ if (!intlv_ranges.empty()) {
+ AddrRange merged_range(intlv_ranges);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
+ }
}
void
-PhysicalMemory::init()
+PhysicalMemory::createBackingStore(AddrRange range,
+ const vector<AbstractMemory*>& _memories,
+ bool conf_table_reported,
+ bool in_addr_map, bool kvm_map)
{
- if (ports.size() == 0) {
- fatal("PhysicalMemory object %s is unconnected!", name());
- }
+ panic_if(range.interleaved(),
+ "Cannot create backing store for interleaved range %s\n",
+ range.to_string());
+
+ // perform the actual mmap
+ DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
+ range.to_string(), range.size());
+ int map_flags = MAP_ANON | MAP_PRIVATE;
- for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
- if (*pi)
- (*pi)->sendStatusChange(Port::RangeChange);
+ // to be able to simulate very large memories, the user can opt to
+ // pass noreserve to mmap
+ if (mmapUsingNoReserve) {
+ map_flags |= MAP_NORESERVE;
}
-}
-PhysicalMemory::~PhysicalMemory()
-{
- if (pmemAddr)
- munmap((char*)pmemAddr, params()->range.size());
- //Remove memPorts?
-}
+ uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
+ PROT_READ | PROT_WRITE,
+ map_flags, -1, 0);
-Addr
-PhysicalMemory::new_page()
-{
- Addr return_addr = pagePtr << LogVMPageSize;
- return_addr += start();
+ if (pmem == (uint8_t*) MAP_FAILED) {
+ perror("mmap");
+ fatal("Could not mmap %d bytes for range %s!\n", range.size(),
+ range.to_string());
+ }
- ++pagePtr;
- return return_addr;
-}
+ // remember this backing store so we can checkpoint it and unmap
+ // it appropriately
+ backingStore.emplace_back(range, pmem,
+ conf_table_reported, in_addr_map, kvm_map);
-int
-PhysicalMemory::deviceBlockSize()
-{
- //Can accept anysize request
- return 0;
+ // point the memories to their backing store
+ for (const auto& m : _memories) {
+ DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
+ m->name());
+ m->setBackingStore(pmem);
+ }
}
-Tick
-PhysicalMemory::calculateLatency(PacketPtr pkt)
+PhysicalMemory::~PhysicalMemory()
{
- return lat;
+ // unmap the backing store
+ for (auto& s : backingStore)
+ munmap((char*)s.pmem, s.range.size());
}
-
-
-// Add load-locked to tracking list. Should only be called if the
-// operation is a load and the LOCKED flag is set.
-void
-PhysicalMemory::trackLoadLocked(PacketPtr pkt)
+bool
+PhysicalMemory::isMemAddr(Addr addr) const
{
- Request *req = pkt->req;
- Addr paddr = LockedAddr::mask(req->getPaddr());
-
- // first we check if we already have a locked addr for this
- // xc. Since each xc only gets one, we just update the
- // existing record with the new address.
- list<LockedAddr>::iterator i;
-
- for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
- if (i->matchesContext(req)) {
- DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
- req->getCpuNum(), req->getThreadNum(), paddr);
- i->addr = paddr;
- return;
- }
- }
-
- // no record for this xc: need to allocate a new one
- DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
- req->getCpuNum(), req->getThreadNum(), paddr);
- lockedAddrList.push_front(LockedAddr(req));
+ return addrMap.contains(addr) != addrMap.end();
}
-
-// Called on *writes* only... both regular stores and
-// store-conditional operations. Check for conventional stores which
-// conflict with locked addresses, and for success/failure of store
-// conditionals.
-bool
-PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
+AddrRangeList
+PhysicalMemory::getConfAddrRanges() const
{
- Request *req = pkt->req;
- Addr paddr = LockedAddr::mask(req->getPaddr());
- bool isLocked = pkt->isLocked();
-
- // Initialize return value. Non-conditional stores always
- // succeed. Assume conditional stores will fail until proven
- // otherwise.
- bool success = !isLocked;
-
- // Iterate over list. Note that there could be multiple matching
- // records, as more than one context could have done a load locked
- // to this location.
- list<LockedAddr>::iterator i = lockedAddrList.begin();
-
- while (i != lockedAddrList.end()) {
-
- if (i->addr == paddr) {
- // we have a matching address
-
- if (isLocked && i->matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
- DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
- req->getCpuNum(), req->getThreadNum(), paddr);
- success = true;
+ // this could be done once in the constructor, but since it is unlikely to
+ // be called more than once the iteration should not be a problem
+ AddrRangeList ranges;
+ vector<AddrRange> intlv_ranges;
+ for (const auto& r : addrMap) {
+ if (r.second->isConfReported()) {
+ // if the range is interleaved then save it for now
+ if (r.first.interleaved()) {
+ // if we already got interleaved ranges that are not
+ // part of the same range, then first do a merge
+ // before we add the new one
+ if (!intlv_ranges.empty() &&
+ !intlv_ranges.back().mergesWith(r.first)) {
+ ranges.push_back(AddrRange(intlv_ranges));
+ intlv_ranges.clear();
+ }
+ intlv_ranges.push_back(r.first);
+ } else {
+ // keep the current range
+ ranges.push_back(r.first);
}
-
- // Get rid of our record of this lock and advance to next
- DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
- i->cpuNum, i->threadNum, paddr);
- i = lockedAddrList.erase(i);
- }
- else {
- // no match: advance to next record
- ++i;
}
}
- if (isLocked) {
- req->setExtraData(success ? 1 : 0);
+ // if there is still interleaved ranges waiting to be merged,
+ // go ahead and do it
+ if (!intlv_ranges.empty()) {
+ ranges.push_back(AddrRange(intlv_ranges));
}
- return success;
+ return ranges;
}
-
-#if TRACING_ON
-
-#define CASE(A, T) \
- case sizeof(T): \
- DPRINTF(MemoryAccess, A " of size %i on address 0x%x data 0x%x\n", \
- pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
- break
-
-
-#define TRACE_PACKET(A) \
- do { \
- switch (pkt->getSize()) { \
- CASE(A, uint64_t); \
- CASE(A, uint32_t); \
- CASE(A, uint16_t); \
- CASE(A, uint8_t); \
- default: \
- DPRINTF(MemoryAccess, A " of size %i on address 0x%x\n", \
- pkt->getSize(), pkt->getAddr()); \
- } \
- } while (0)
-
-#else
-
-#define TRACE_PACKET(A)
-
-#endif
-
-Tick
-PhysicalMemory::doAtomicAccess(PacketPtr pkt)
+void
+PhysicalMemory::access(PacketPtr pkt)
{
- assert(pkt->getAddr() >= start() &&
- pkt->getAddr() + pkt->getSize() <= start() + size());
-
- if (pkt->memInhibitAsserted()) {
- DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
- pkt->getAddr());
- return 0;
- }
-
- uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
-
- if (pkt->cmd == MemCmd::SwapReq) {
- IntReg overwrite_val;
- bool overwrite_mem;
- uint64_t condition_val64;
- uint32_t condition_val32;
-
- assert(sizeof(IntReg) >= pkt->getSize());
-
- overwrite_mem = true;
- // keep a copy of our possible write value, and copy what is at the
- // memory address into the packet
- std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
- std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
-
- if (pkt->req->isCondSwap()) {
- if (pkt->getSize() == sizeof(uint64_t)) {
- condition_val64 = pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
- sizeof(uint64_t));
- } else if (pkt->getSize() == sizeof(uint32_t)) {
- condition_val32 = (uint32_t)pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
- sizeof(uint32_t));
- } else
- panic("Invalid size for conditional read/write\n");
- }
-
- if (overwrite_mem)
- std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
-
- TRACE_PACKET("Read/Write");
- } else if (pkt->isRead()) {
- assert(!pkt->isWrite());
- if (pkt->isLocked()) {
- trackLoadLocked(pkt);
- }
- memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
- TRACE_PACKET("Read");
- } else if (pkt->isWrite()) {
- if (writeOK(pkt)) {
- memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
- TRACE_PACKET("Write");
- }
- } else if (pkt->isInvalidate()) {
- //upgrade or invalidate
- if (pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- }
- } else {
- panic("unimplemented");
- }
-
- if (pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- }
- return calculateLatency(pkt);
+ assert(pkt->isRequest());
+ const auto& m = addrMap.contains(pkt->getAddrRange());
+ assert(m != addrMap.end());
+ m->second->access(pkt);
}
-
void
-PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
+PhysicalMemory::functionalAccess(PacketPtr pkt)
{
- assert(pkt->getAddr() >= start() &&
- pkt->getAddr() + pkt->getSize() <= start() + size());
-
-
- uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
-
- if (pkt->cmd == MemCmd::ReadReq) {
- memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
- TRACE_PACKET("Read");
- } else if (pkt->cmd == MemCmd::WriteReq) {
- memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
- TRACE_PACKET("Write");
- } else {
- panic("PhysicalMemory: unimplemented functional command %s",
- pkt->cmdString());
- }
-
- pkt->makeAtomicResponse();
+ assert(pkt->isRequest());
+ const auto& m = addrMap.contains(pkt->getAddrRange());
+ assert(m != addrMap.end());
+ m->second->functionalAccess(pkt);
}
-
-Port *
-PhysicalMemory::getPort(const std::string &if_name, int idx)
+void
+PhysicalMemory::serialize(CheckpointOut &cp) const
{
- // Accept request for "functional" port for backwards compatibility
- // with places where this function is called from C++. I'd prefer
- // to move all these into Python someday.
- if (if_name == "functional") {
- return new MemoryPort(csprintf("%s-functional", name()), this);
+ // serialize all the locked addresses and their context ids
+ vector<Addr> lal_addr;
+ vector<ContextID> lal_cid;
+
+ for (auto& m : memories) {
+ const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
+ for (const auto& l : locked_addrs) {
+ lal_addr.push_back(l.addr);
+ lal_cid.push_back(l.contextId);
+ }
}
- if (if_name != "port") {
- panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
- }
+ SERIALIZE_CONTAINER(lal_addr);
+ SERIALIZE_CONTAINER(lal_cid);
- if (idx >= ports.size()) {
- ports.resize(idx+1);
- }
+ // serialize the backing stores
+ unsigned int nbr_of_stores = backingStore.size();
+ SERIALIZE_SCALAR(nbr_of_stores);
- if (ports[idx] != NULL) {
- panic("PhysicalMemory::getPort: port %d already assigned", idx);
+ unsigned int store_id = 0;
+ // store each backing store memory segment in a file
+ for (auto& s : backingStore) {
+ ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
+ serializeStore(cp, store_id++, s.range, s.pmem);
}
-
- MemoryPort *port =
- new MemoryPort(csprintf("%s-port%d", name(), idx), this);
-
- ports[idx] = port;
- return port;
}
-
void
-PhysicalMemory::recvStatusChange(Port::Status status)
+PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
+ AddrRange range, uint8_t* pmem) const
{
-}
+ // we cannot use the address range for the name as the
+ // memories that are not part of the address map can overlap
+ string filename = name() + ".store" + to_string(store_id) + ".pmem";
+ long range_size = range.size();
-PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
- PhysicalMemory *_memory)
- : SimpleTimingPort(_name), memory(_memory)
-{ }
+ DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
+ filename, range_size);
-void
-PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
-{
- memory->recvStatusChange(status);
-}
+ SERIALIZE_SCALAR(store_id);
+ SERIALIZE_SCALAR(filename);
+ SERIALIZE_SCALAR(range_size);
-void
-PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
- bool &snoop)
-{
- memory->getAddressRanges(resp, snoop);
-}
+ // write memory file
+ string filepath = CheckpointIn::dir() + "/" + filename.c_str();
+ gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
+ if (compressed_mem == NULL)
+ fatal("Can't open physical memory checkpoint file '%s'\n",
+ filename);
-void
-PhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop)
-{
- snoop = false;
- resp.clear();
- resp.push_back(RangeSize(start(), params()->range.size()));
-}
+ uint64_t pass_size = 0;
-int
-PhysicalMemory::MemoryPort::deviceBlockSize()
-{
- return memory->deviceBlockSize();
-}
+ // gzwrite fails if (int)len < 0 (gzwrite returns int)
+ for (uint64_t written = 0; written < range.size();
+ written += pass_size) {
+ pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
+ (uint64_t)INT_MAX : (range.size() - written);
-Tick
-PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
-{
- return memory->doAtomicAccess(pkt);
-}
-
-void
-PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
-{
- if (!checkFunctional(pkt)) {
- // Default implementation of SimpleTimingPort::recvFunctional()
- // calls recvAtomic() and throws away the latency; we can save a
- // little here by just not calculating the latency.
- memory->doFunctionalAccess(pkt);
+ if (gzwrite(compressed_mem, pmem + written,
+ (unsigned int) pass_size) != (int) pass_size) {
+ fatal("Write failed on physical memory checkpoint file '%s'\n",
+ filename);
+ }
}
-}
-unsigned int
-PhysicalMemory::drain(Event *de)
-{
- int count = 0;
- for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
- count += (*pi)->drain(de);
- }
+ // close the compressed stream and check that the exit status
+ // is zero
+ if (gzclose(compressed_mem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
- if (count)
- changeState(Draining);
- else
- changeState(Drained);
- return count;
}
void
-PhysicalMemory::serialize(ostream &os)
+PhysicalMemory::unserialize(CheckpointIn &cp)
{
- gzFile compressedMem;
- string filename = name() + ".physmem";
-
- SERIALIZE_SCALAR(filename);
-
- // write memory file
- string thefile = Checkpoint::dir() + "/" + filename.c_str();
- int fd = creat(thefile.c_str(), 0664);
- if (fd < 0) {
- perror("creat");
- fatal("Can't open physical memory checkpoint file '%s'\n", filename);
+ // unserialize the locked addresses and map them to the
+ // appropriate memory controller
+ vector<Addr> lal_addr;
+ vector<ContextID> lal_cid;
+ UNSERIALIZE_CONTAINER(lal_addr);
+ UNSERIALIZE_CONTAINER(lal_cid);
+ for (size_t i = 0; i < lal_addr.size(); ++i) {
+ const auto& m = addrMap.contains(lal_addr[i]);
+ m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
}
- compressedMem = gzdopen(fd, "wb");
- if (compressedMem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
+ // unserialize the backing stores
+ unsigned int nbr_of_stores;
+ UNSERIALIZE_SCALAR(nbr_of_stores);
- if (gzwrite(compressedMem, pmemAddr, params()->range.size()) !=
- params()->range.size()) {
- fatal("Write failed on physical memory checkpoint file '%s'\n",
- filename);
+ for (unsigned int i = 0; i < nbr_of_stores; ++i) {
+ ScopedCheckpointSection sec(cp, csprintf("store%d", i));
+ unserializeStore(cp);
}
- if (gzclose(compressedMem))
- fatal("Close failed on physical memory checkpoint file '%s'\n",
- filename);
}
void
-PhysicalMemory::unserialize(Checkpoint *cp, const string §ion)
+PhysicalMemory::unserializeStore(CheckpointIn &cp)
{
- gzFile compressedMem;
- long *tempPage;
- long *pmem_current;
- uint64_t curSize;
- uint32_t bytesRead;
- const int chunkSize = 16384;
+ const uint32_t chunk_size = 16384;
+ unsigned int store_id;
+ UNSERIALIZE_SCALAR(store_id);
string filename;
-
UNSERIALIZE_SCALAR(filename);
-
- filename = cp->cptDir + "/" + filename;
+ string filepath = cp.cptDir + "/" + filename;
// mmap memoryfile
- int fd = open(filename.c_str(), O_RDONLY);
- if (fd < 0) {
- perror("open");
+ gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
+ if (compressed_mem == NULL)
fatal("Can't open physical memory checkpoint file '%s'", filename);
- }
-
- compressedMem = gzdopen(fd, "rb");
- if (compressedMem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
-
- // unmap file that was mmaped in the constructor
- // This is done here to make sure that gzip and open don't muck with our
- // nice large space of memory before we reallocate it
- munmap((char*)pmemAddr, params()->range.size());
-
- pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(),
- PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
- if (pmemAddr == (void *)MAP_FAILED) {
- perror("mmap");
- fatal("Could not mmap physical memory!\n");
- }
-
- curSize = 0;
- tempPage = (long*)malloc(chunkSize);
- if (tempPage == NULL)
- fatal("Unable to malloc memory to read file %s\n", filename);
-
- /* Only copy bytes that are non-zero, so we don't give the VM system hell */
- while (curSize < params()->range.size()) {
- bytesRead = gzread(compressedMem, tempPage, chunkSize);
- if (bytesRead != chunkSize &&
- bytesRead != params()->range.size() - curSize)
- fatal("Read failed on physical memory checkpoint file '%s'"
- " got %d bytes, expected %d or %d bytes\n",
- filename, bytesRead, chunkSize,
- params()->range.size() - curSize);
-
- assert(bytesRead % sizeof(long) == 0);
-
- for (int x = 0; x < bytesRead/sizeof(long); x++)
- {
- if (*(tempPage+x) != 0) {
- pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
- *pmem_current = *(tempPage+x);
- }
+ // we've already got the actual backing store mapped
+ uint8_t* pmem = backingStore[store_id].pmem;
+ AddrRange range = backingStore[store_id].range;
+
+ long range_size;
+ UNSERIALIZE_SCALAR(range_size);
+
+ DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
+ filename, range_size);
+
+ if (range_size != range.size())
+ fatal("Memory range size has changed! Saw %lld, expected %lld\n",
+ range_size, range.size());
+
+ uint64_t curr_size = 0;
+ long* temp_page = new long[chunk_size];
+ long* pmem_current;
+ uint32_t bytes_read;
+ while (curr_size < range.size()) {
+ bytes_read = gzread(compressed_mem, temp_page, chunk_size);
+ if (bytes_read == 0)
+ break;
+
+ assert(bytes_read % sizeof(long) == 0);
+
+ for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
+ // Only copy bytes that are non-zero, so we don't give
+ // the VM system hell
+ if (*(temp_page + x) != 0) {
+ pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
+ *pmem_current = *(temp_page + x);
+ }
}
- curSize += bytesRead;
+ curr_size += bytes_read;
}
- free(tempPage);
+ delete[] temp_page;
- if (gzclose(compressedMem))
+ if (gzclose(compressed_mem))
fatal("Close failed on physical memory checkpoint file '%s'\n",
filename);
-
-}
-
-PhysicalMemory *
-PhysicalMemoryParams::create()
-{
- return new PhysicalMemory(this);
}