}
}
- // iterate over the increasing addresses and create as large
- // chunks as possible of contigous space to be mapped to backing
- // store, also remember what memories constitute the range so we
- // can go and find out if we have to init their parts to zero
- AddrRange curr_range;
+ // iterate over the increasing addresses and chunks of contigous
+ // space to be mapped to backing store, also remember what
+ // memories constitute the range so we can go and find out if we
+ // have to init their parts to zero
vector<AbstractMemory*> curr_memories;
for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
r != addrMap.end(); ++r) {
// simply skip past all memories that are null and hence do
// not need any backing store
if (!r->second->isNull()) {
- // if the current range is valid, decide if we split or
- // not
- if (curr_range.valid()) {
- // if the ranges are neighbours, then append, this
- // will eventually be extended to include support for
- // address striping and merge the interleaved ranges
- if (curr_range.end + 1 == r->first.start) {
- DPRINTF(BusAddrRanges,
- "Merging neighbouring ranges %x:%x and %x:%x\n",
- curr_range.start, curr_range.end, r->first.start,
- r->first.end);
- // update the end of the range and add the current
- // memory to the list of memories
- curr_range.end = r->first.end;
- curr_memories.push_back(r->second);
- } else {
- // what we already have is valid, and this is not
- // contigious, so create the backing store and
- // then start over
- createBackingStore(curr_range, curr_memories);
-
- // remember the current range and reset the current
- // set of memories to contain this one
- curr_range = r->first;
- curr_memories.clear();
- curr_memories.push_back(r->second);
- }
- } else {
- // we haven't seen any valid ranges yet, so remember
- // the current range and reset the current set of
- // memories to contain this one
- curr_range = r->first;
- curr_memories.clear();
- curr_memories.push_back(r->second);
- }
+ // this will eventually be extended to support merging of
+ // interleaved address ranges, and although it might seem
+ // overly complicated at this point it will all be used
+ curr_memories.push_back(r->second);
+ createBackingStore(r->first, curr_memories);
+ curr_memories.clear();
}
}
-
- // if we have a valid range upon finishing the iteration, then
- // create the backing store
- if (curr_range.valid())
- createBackingStore(curr_range, curr_memories);
}
void
PhysicalMemory::createBackingStore(AddrRange range,
const vector<AbstractMemory*>& _memories)
{
+ if (range.interleaved())
+ panic("Cannot create backing store for interleaved range %s\n",
+ range.to_string());
+
// perform the actual mmap
- DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
- range.start, range.end);
+ DPRINTF(BusAddrRanges, "Creating backing store for range %s with size %d\n",
+ range.to_string(), range.size());
int map_flags = MAP_ANON | MAP_PRIVATE;
uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
PROT_READ | PROT_WRITE,
if (pmem == (uint8_t*) MAP_FAILED) {
perror("mmap");
- fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
- range.start, range.end);
+ fatal("Could not mmap %d bytes for range %s!\n", range.size(),
+ range.to_string());
}
// remember this backing store so we can checkpoint it and unmap
// it appropriately
backingStore.push_back(make_pair(range, pmem));
+ // count how many of the memories are to be zero initialized so we
+ // can see if some but not all have this parameter set
+ uint32_t init_to_zero = 0;
+
// point the memories to their backing store, and if requested,
// initialize the memory range to 0
for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
(*m)->setBackingStore(pmem);
// if it should be zero, then go and make it so
- if ((*m)->initToZero())
- memset(pmem, 0, (*m)->size());
+ if ((*m)->initToZero()) {
+ ++init_to_zero;
+ }
+ }
+
+ if (init_to_zero != 0) {
+ if (init_to_zero != _memories.size())
+ fatal("Some, but not all memories in range %s are set zero\n",
+ range.to_string());
- // advance the pointer for the next memory in line
- pmem += (*m)->size();
+ memset(pmem, 0, range.size());
}
}
PhysicalMemory::isMemAddr(Addr addr) const
{
// see if the address is within the last matched range
- if (addr != rangeCache) {
+ if (!rangeCache.contains(addr)) {
// lookup in the interval tree
AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
if (r == addrMap.end()) {
// this could be done once in the constructor, but since it is unlikely to
// be called more than once the iteration should not be a problem
AddrRangeList ranges;
- for (vector<AbstractMemory*>::const_iterator m = memories.begin();
- m != memories.end(); ++m) {
- if ((*m)->isConfReported()) {
- ranges.push_back((*m)->getAddrRange());
+ vector<AddrRange> intlv_ranges;
+ for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
+ r != addrMap.end(); ++r) {
+ if (r->second->isConfReported()) {
+ // if the range is interleaved then save it for now
+ if (r->first.interleaved()) {
+ // if we already got interleaved ranges that are not
+ // part of the same range, then first do a merge
+ // before we add the new one
+ if (!intlv_ranges.empty() &&
+ !intlv_ranges.back().mergesWith(r->first)) {
+ ranges.push_back(AddrRange(intlv_ranges));
+ intlv_ranges.clear();
+ }
+ intlv_ranges.push_back(r->first);
+ } else {
+ // keep the current range
+ ranges.push_back(r->first);
+ }
}
}
+ // if there is still interleaved ranges waiting to be merged,
+ // go ahead and do it
+ if (!intlv_ranges.empty()) {
+ ranges.push_back(AddrRange(intlv_ranges));
+ }
+
return ranges;
}
arrayParamIn(cp, section, "lal_addr", lal_addr);
arrayParamIn(cp, section, "lal_cid", lal_cid);
for(size_t i = 0; i < lal_addr.size(); ++i) {
- AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
+ AddrRangeMap<AbstractMemory*>::const_iterator m =
+ addrMap.find(lal_addr[i]);
m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
}