/*
- * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2012, 2014, 2018 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* Authors: Andreas Hansson
*/
-#include "debug/BusAddrRanges.hh"
#include "mem/physical.hh"
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/user.h>
+#include <unistd.h>
+#include <zlib.h>
+
+#include <cerrno>
+#include <climits>
+#include <cstdio>
+#include <iostream>
+#include <string>
+
+#include "base/trace.hh"
+#include "debug/AddrRanges.hh"
+#include "debug/Checkpoint.hh"
+#include "mem/abstract_mem.hh"
+
+/**
+ * On Linux, MAP_NORESERVE allow us to simulate a very large memory
+ * without committing to actually providing the swap space on the
+ * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
+ * so simply make it 0.
+ */
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#ifndef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+#endif
+
using namespace std;
-PhysicalMemory::PhysicalMemory(const vector<AbstractMemory*>& _memories) :
- size(0)
+PhysicalMemory::PhysicalMemory(const string& _name,
+ const vector<AbstractMemory*>& _memories,
+ bool mmap_using_noreserve) :
+ _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve)
{
- for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
- m != _memories.end(); ++m) {
+ if (mmap_using_noreserve)
+ warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
+
+ // add the memories from the system to the address map as
+ // appropriate
+ for (const auto& m : _memories) {
// only add the memory if it is part of the global address map
- if ((*m)->isInAddrMap()) {
- memories.push_back(*m);
+ if (m->isInAddrMap()) {
+ memories.push_back(m);
// calculate the total size once and for all
- size += (*m)->size();
+ size += m->size();
// add the range to our interval tree and make sure it does not
// intersect an existing range
- if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
- fatal("Memory address range for %s is overlapping\n",
- (*m)->name());
+ fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
+ "Memory address range for %s is overlapping\n",
+ m->name());
+ } else {
+ // this type of memory is used e.g. as reference memory by
+ // Ruby, and they also needs a backing store, but should
+ // not be part of the global address map
+ DPRINTF(AddrRanges,
+ "Skipping memory %s that is not in global address map\n",
+ m->name());
+
+ // sanity check
+ fatal_if(m->getAddrRange().interleaved(),
+ "Memory %s that is not in the global address map cannot "
+ "be interleaved\n", m->name());
+
+ // simply do it independently, also note that this kind of
+ // memories are allowed to overlap in the logic address
+ // map
+ vector<AbstractMemory*> unmapped_mems{m};
+ createBackingStore(m->getAddrRange(), unmapped_mems,
+ m->isConfReported(), m->isInAddrMap(),
+ m->isKvmMap());
}
- DPRINTF(BusAddrRanges,
- "Skipping memory %s that is not in global address map\n",
- (*m)->name());
}
- rangeCache.invalidate();
+
+ // iterate over the increasing addresses and chunks of contiguous
+ // space to be mapped to backing store, create it and inform the
+ // memories
+ vector<AddrRange> intlv_ranges;
+ vector<AbstractMemory*> curr_memories;
+ for (const auto& r : addrMap) {
+ // simply skip past all memories that are null and hence do
+ // not need any backing store
+ if (!r.second->isNull()) {
+ // if the range is interleaved then save it for now
+ if (r.first.interleaved()) {
+ // if we already got interleaved ranges that are not
+ // part of the same range, then first do a merge
+ // before we add the new one
+ if (!intlv_ranges.empty() &&
+ !intlv_ranges.back().mergesWith(r.first)) {
+ AddrRange merged_range(intlv_ranges);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
+
+ intlv_ranges.clear();
+ curr_memories.clear();
+ }
+ intlv_ranges.push_back(r.first);
+ curr_memories.push_back(r.second);
+ } else {
+ vector<AbstractMemory*> single_memory{r.second};
+ createBackingStore(r.first, single_memory,
+ r.second->isConfReported(),
+ r.second->isInAddrMap(),
+ r.second->isKvmMap());
+ }
+ }
+ }
+
+ // if there is still interleaved ranges waiting to be merged, go
+ // ahead and do it
+ if (!intlv_ranges.empty()) {
+ AddrRange merged_range(intlv_ranges);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
+ }
}
-bool
-PhysicalMemory::isMemAddr(Addr addr) const
+void
+PhysicalMemory::createBackingStore(AddrRange range,
+ const vector<AbstractMemory*>& _memories,
+ bool conf_table_reported,
+ bool in_addr_map, bool kvm_map)
{
- // see if the address is within the last matched range
- if (addr != rangeCache) {
- // lookup in the interval tree
- range_map<Addr, AbstractMemory*>::const_iterator r =
- addrMap.find(addr);
- if (r == addrMap.end()) {
- // not in the cache, and not in the tree
- return false;
- }
- // the range is in the tree, update the cache
- rangeCache = r->first;
+ panic_if(range.interleaved(),
+ "Cannot create backing store for interleaved range %s\n",
+ range.to_string());
+
+ // perform the actual mmap
+ DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
+ range.to_string(), range.size());
+ int map_flags = MAP_ANON | MAP_PRIVATE;
+
+ // to be able to simulate very large memories, the user can opt to
+ // pass noreserve to mmap
+ if (mmapUsingNoReserve) {
+ map_flags |= MAP_NORESERVE;
+ }
+
+ uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
+ PROT_READ | PROT_WRITE,
+ map_flags, -1, 0);
+
+ if (pmem == (uint8_t*) MAP_FAILED) {
+ perror("mmap");
+ fatal("Could not mmap %d bytes for range %s!\n", range.size(),
+ range.to_string());
}
- assert(addrMap.find(addr) != addrMap.end());
+ // remember this backing store so we can checkpoint it and unmap
+ // it appropriately
+ backingStore.emplace_back(range, pmem,
+ conf_table_reported, in_addr_map, kvm_map);
- // either matched the cache or found in the tree
- return true;
+ // point the memories to their backing store
+ for (const auto& m : _memories) {
+ DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
+ m->name());
+ m->setBackingStore(pmem);
+ }
+}
+
+PhysicalMemory::~PhysicalMemory()
+{
+ // unmap the backing store
+ for (auto& s : backingStore)
+ munmap((char*)s.pmem, s.range.size());
+}
+
+bool
+PhysicalMemory::isMemAddr(Addr addr) const
+{
+ return addrMap.contains(addr) != addrMap.end();
}
AddrRangeList
// this could be done once in the constructor, but since it is unlikely to
// be called more than once the iteration should not be a problem
AddrRangeList ranges;
- for (vector<AbstractMemory*>::const_iterator m = memories.begin();
- m != memories.end(); ++m) {
- if ((*m)->isConfReported()) {
- ranges.push_back((*m)->getAddrRange());
+ vector<AddrRange> intlv_ranges;
+ for (const auto& r : addrMap) {
+ if (r.second->isConfReported()) {
+ // if the range is interleaved then save it for now
+ if (r.first.interleaved()) {
+ // if we already got interleaved ranges that are not
+ // part of the same range, then first do a merge
+ // before we add the new one
+ if (!intlv_ranges.empty() &&
+ !intlv_ranges.back().mergesWith(r.first)) {
+ ranges.push_back(AddrRange(intlv_ranges));
+ intlv_ranges.clear();
+ }
+ intlv_ranges.push_back(r.first);
+ } else {
+ // keep the current range
+ ranges.push_back(r.first);
+ }
}
}
+ // if there is still interleaved ranges waiting to be merged,
+ // go ahead and do it
+ if (!intlv_ranges.empty()) {
+ ranges.push_back(AddrRange(intlv_ranges));
+ }
+
return ranges;
}
PhysicalMemory::access(PacketPtr pkt)
{
assert(pkt->isRequest());
- Addr addr = pkt->getAddr();
- range_map<Addr, AbstractMemory*>::const_iterator m = addrMap.find(addr);
+ const auto& m = addrMap.contains(pkt->getAddrRange());
assert(m != addrMap.end());
m->second->access(pkt);
}
PhysicalMemory::functionalAccess(PacketPtr pkt)
{
assert(pkt->isRequest());
- Addr addr = pkt->getAddr();
- range_map<Addr, AbstractMemory*>::const_iterator m = addrMap.find(addr);
+ const auto& m = addrMap.contains(pkt->getAddrRange());
assert(m != addrMap.end());
m->second->functionalAccess(pkt);
}
+
+void
+PhysicalMemory::serialize(CheckpointOut &cp) const
+{
+ // serialize all the locked addresses and their context ids
+ vector<Addr> lal_addr;
+ vector<ContextID> lal_cid;
+
+ for (auto& m : memories) {
+ const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
+ for (const auto& l : locked_addrs) {
+ lal_addr.push_back(l.addr);
+ lal_cid.push_back(l.contextId);
+ }
+ }
+
+ SERIALIZE_CONTAINER(lal_addr);
+ SERIALIZE_CONTAINER(lal_cid);
+
+ // serialize the backing stores
+ unsigned int nbr_of_stores = backingStore.size();
+ SERIALIZE_SCALAR(nbr_of_stores);
+
+ unsigned int store_id = 0;
+ // store each backing store memory segment in a file
+ for (auto& s : backingStore) {
+ ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
+ serializeStore(cp, store_id++, s.range, s.pmem);
+ }
+}
+
+void
+PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
+ AddrRange range, uint8_t* pmem) const
+{
+ // we cannot use the address range for the name as the
+ // memories that are not part of the address map can overlap
+ string filename = name() + ".store" + to_string(store_id) + ".pmem";
+ long range_size = range.size();
+
+ DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
+ filename, range_size);
+
+ SERIALIZE_SCALAR(store_id);
+ SERIALIZE_SCALAR(filename);
+ SERIALIZE_SCALAR(range_size);
+
+ // write memory file
+ string filepath = CheckpointIn::dir() + "/" + filename.c_str();
+ gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
+ if (compressed_mem == NULL)
+ fatal("Can't open physical memory checkpoint file '%s'\n",
+ filename);
+
+ uint64_t pass_size = 0;
+
+ // gzwrite fails if (int)len < 0 (gzwrite returns int)
+ for (uint64_t written = 0; written < range.size();
+ written += pass_size) {
+ pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
+ (uint64_t)INT_MAX : (range.size() - written);
+
+ if (gzwrite(compressed_mem, pmem + written,
+ (unsigned int) pass_size) != (int) pass_size) {
+ fatal("Write failed on physical memory checkpoint file '%s'\n",
+ filename);
+ }
+ }
+
+ // close the compressed stream and check that the exit status
+ // is zero
+ if (gzclose(compressed_mem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
+
+}
+
+void
+PhysicalMemory::unserialize(CheckpointIn &cp)
+{
+ // unserialize the locked addresses and map them to the
+ // appropriate memory controller
+ vector<Addr> lal_addr;
+ vector<ContextID> lal_cid;
+ UNSERIALIZE_CONTAINER(lal_addr);
+ UNSERIALIZE_CONTAINER(lal_cid);
+ for (size_t i = 0; i < lal_addr.size(); ++i) {
+ const auto& m = addrMap.contains(lal_addr[i]);
+ m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
+ }
+
+ // unserialize the backing stores
+ unsigned int nbr_of_stores;
+ UNSERIALIZE_SCALAR(nbr_of_stores);
+
+ for (unsigned int i = 0; i < nbr_of_stores; ++i) {
+ ScopedCheckpointSection sec(cp, csprintf("store%d", i));
+ unserializeStore(cp);
+ }
+
+}
+
+void
+PhysicalMemory::unserializeStore(CheckpointIn &cp)
+{
+ const uint32_t chunk_size = 16384;
+
+ unsigned int store_id;
+ UNSERIALIZE_SCALAR(store_id);
+
+ string filename;
+ UNSERIALIZE_SCALAR(filename);
+ string filepath = cp.cptDir + "/" + filename;
+
+ // mmap memoryfile
+ gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
+ if (compressed_mem == NULL)
+ fatal("Can't open physical memory checkpoint file '%s'", filename);
+
+ // we've already got the actual backing store mapped
+ uint8_t* pmem = backingStore[store_id].pmem;
+ AddrRange range = backingStore[store_id].range;
+
+ long range_size;
+ UNSERIALIZE_SCALAR(range_size);
+
+ DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
+ filename, range_size);
+
+ if (range_size != range.size())
+ fatal("Memory range size has changed! Saw %lld, expected %lld\n",
+ range_size, range.size());
+
+ uint64_t curr_size = 0;
+ long* temp_page = new long[chunk_size];
+ long* pmem_current;
+ uint32_t bytes_read;
+ while (curr_size < range.size()) {
+ bytes_read = gzread(compressed_mem, temp_page, chunk_size);
+ if (bytes_read == 0)
+ break;
+
+ assert(bytes_read % sizeof(long) == 0);
+
+ for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
+ // Only copy bytes that are non-zero, so we don't give
+ // the VM system hell
+ if (*(temp_page + x) != 0) {
+ pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
+ *pmem_current = *(temp_page + x);
+ }
+ }
+ curr_size += bytes_read;
+ }
+
+ delete[] temp_page;
+
+ if (gzclose(compressed_mem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
+}