void
KvmVM::delayedStartup()
{
- const std::vector<std::pair<AddrRange, uint8_t*> >&memories(
+ const std::vector<BackingStoreEntry> &memories(
system->getPhysMem().getBackingStore());
DPRINTF(Kvm, "Mapping %i memory region(s)\n", memories.size());
for (int slot(0); slot < memories.size(); ++slot) {
- const AddrRange &range(memories[slot].first);
- void *pmem(memories[slot].second);
+ if (!memories[slot].kvmMap) {
+ DPRINTF(Kvm, "Skipping region marked as not usable by KVM\n");
+ continue;
+ }
+
+ const AddrRange &range(memories[slot].range);
+ void *pmem(memories[slot].pmem);
if (pmem) {
DPRINTF(Kvm, "Mapping region: 0x%p -> 0x%llx [size: 0x%llx]\n",
# e.g. by the testers that use shadow memories as a reference
in_addr_map = Param.Bool(True, "Memory part of the global address map")
+ # When KVM acceleration is used, memory is mapped into the guest process
+ # address space and accessed directly. Some memories may need to be
+ # excluded from this mapping if they overlap with other memory ranges or
+ # are not accessible by the CPU.
+ kvm_map = Param.Bool(True, "Should KVM map this memory for the guest")
+
# Should the bootloader include this memory when passing
# configuration information about the physical memory layout to
# the kernel, e.g. using ATAG or ACPI
AbstractMemory::AbstractMemory(const Params *p) :
MemObject(p), range(params()->range), pmemAddr(NULL),
confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
- _system(NULL)
+ kvmMap(p->kvm_map), _system(NULL)
{
}
uint8_t* pmemAddr;
// Enable specific memories to be reported to the configuration table
- bool confTableReported;
+ const bool confTableReported;
// Should the memory appear in the global address map
- bool inAddrMap;
+ const bool inAddrMap;
+
+ // Should KVM map this memory for the guest
+ const bool kvmMap;
std::list<LockedAddr> lockedAddrList;
*/
bool isInAddrMap() const { return inAddrMap; }
+ /**
+ * When shadow memories are in use, KVM may want to make one or the other,
+ * but cannot map both into the guest address space.
+ *
+ * @return if this memory should be mapped into the KVM guest address space
+ */
+ bool isKvmMap() const { return kvmMap; }
+
/**
* Perform an untimed memory access and update all the state
* (e.g. locked addresses) and statistics accordingly. The packet
// memories are allowed to overlap in the logic address
// map
vector<AbstractMemory*> unmapped_mems{m};
- createBackingStore(m->getAddrRange(), unmapped_mems);
+ createBackingStore(m->getAddrRange(), unmapped_mems,
+ m->isConfReported(), m->isInAddrMap(),
+ m->isKvmMap());
}
}
if (!intlv_ranges.empty() &&
!intlv_ranges.back().mergesWith(r.first)) {
AddrRange merged_range(intlv_ranges);
- createBackingStore(merged_range, curr_memories);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
+
intlv_ranges.clear();
curr_memories.clear();
}
curr_memories.push_back(r.second);
} else {
vector<AbstractMemory*> single_memory{r.second};
- createBackingStore(r.first, single_memory);
+ createBackingStore(r.first, single_memory,
+ r.second->isConfReported(),
+ r.second->isInAddrMap(),
+ r.second->isKvmMap());
}
}
}
// ahead and do it
if (!intlv_ranges.empty()) {
AddrRange merged_range(intlv_ranges);
- createBackingStore(merged_range, curr_memories);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
}
}
void
PhysicalMemory::createBackingStore(AddrRange range,
- const vector<AbstractMemory*>& _memories)
+ const vector<AbstractMemory*>& _memories,
+ bool conf_table_reported,
+ bool in_addr_map, bool kvm_map)
{
panic_if(range.interleaved(),
"Cannot create backing store for interleaved range %s\n",
// remember this backing store so we can checkpoint it and unmap
// it appropriately
- backingStore.push_back(make_pair(range, pmem));
+ backingStore.emplace_back(range, pmem,
+ conf_table_reported, in_addr_map, kvm_map);
// point the memories to their backing store
for (const auto& m : _memories) {
{
// unmap the backing store
for (auto& s : backingStore)
- munmap((char*)s.second, s.first.size());
+ munmap((char*)s.pmem, s.range.size());
}
bool
// store each backing store memory segment in a file
for (auto& s : backingStore) {
ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
- serializeStore(cp, store_id++, s.first, s.second);
+ serializeStore(cp, store_id++, s.range, s.pmem);
}
}
fatal("Can't open physical memory checkpoint file '%s'", filename);
// we've already got the actual backing store mapped
- uint8_t* pmem = backingStore[store_id].second;
- AddrRange range = backingStore[store_id].first;
+ uint8_t* pmem = backingStore[store_id].pmem;
+ AddrRange range = backingStore[store_id].range;
long range_size;
UNSERIALIZE_SCALAR(range_size);
*/
class AbstractMemory;
+/**
+ * A single entry for the backing store.
+ */
+class BackingStoreEntry
+{
+ public:
+
+ /**
+ * Create a backing store entry. Don't worry about managing the memory
+ * pointers, because PhysicalMemory is responsible for that.
+ */
+ BackingStoreEntry(AddrRange range, uint8_t* pmem,
+ bool conf_table_reported, bool in_addr_map, bool kvm_map)
+ : range(range), pmem(pmem), confTableReported(conf_table_reported),
+ inAddrMap(in_addr_map), kvmMap(kvm_map)
+ {}
+
+ /**
+ * The address range covered in the guest.
+ */
+ AddrRange range;
+
+ /**
+ * Pointer to the host memory this range maps to. This memory is the same
+ * size as the range field.
+ */
+ uint8_t* pmem;
+
+ /**
+ * Whether this memory should be reported to the configuration table
+ */
+ bool confTableReported;
+
+ /**
+ * Whether this memory should appear in the global address map
+ */
+ bool inAddrMap;
+
+ /**
+ * Whether KVM should map this memory into the guest address space during
+ * acceleration.
+ */
+ bool kvmMap;
+};
+
/**
* The physical memory encapsulates all memories in the system and
* provides basic functionality for accessing those memories without
// The physical memory used to provide the memory in the simulated
// system
- std::vector<std::pair<AddrRange, uint8_t*>> backingStore;
+ std::vector<BackingStoreEntry> backingStore;
// Prevent copying
PhysicalMemory(const PhysicalMemory&);
*
* @param range The address range covered
* @param memories The memories this range maps to
+ * @param kvm_map Should KVM map this memory for the guest
*/
void createBackingStore(AddrRange range,
- const std::vector<AbstractMemory*>& _memories);
+ const std::vector<AbstractMemory*>& _memories,
+ bool conf_table_reported,
+ bool in_addr_map, bool kvm_map);
public:
*
* @return Pointers to the memory backing store
*/
- std::vector<std::pair<AddrRange, uint8_t*>> getBackingStore() const
+ std::vector<BackingStoreEntry> getBackingStore() const
{ return backingStore; }
/**