2 * Copyright (c) 2012, 2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Andreas Hansson
41 #include <sys/types.h>
53 #include "base/trace.hh"
54 #include "debug/AddrRanges.hh"
55 #include "debug/Checkpoint.hh"
56 #include "mem/abstract_mem.hh"
57 #include "mem/physical.hh"
60 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
61 * without committing to actually providing the swap space on the
62 * host. On OSX the MAP_NORESERVE flag does not exist, so simply make
65 #if defined(__APPLE__)
67 #define MAP_NORESERVE 0
73 PhysicalMemory::PhysicalMemory(const string
& _name
,
74 const vector
<AbstractMemory
*>& _memories
,
75 bool mmap_using_noreserve
) :
76 _name(_name
), rangeCache(addrMap
.end()), size(0),
77 mmapUsingNoReserve(mmap_using_noreserve
)
79 if (mmap_using_noreserve
)
80 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
82 // add the memories from the system to the address map as
84 for (const auto& m
: _memories
) {
85 // only add the memory if it is part of the global address map
86 if (m
->isInAddrMap()) {
87 memories
.push_back(m
);
89 // calculate the total size once and for all
92 // add the range to our interval tree and make sure it does not
93 // intersect an existing range
94 fatal_if(addrMap
.insert(m
->getAddrRange(), m
) == addrMap
.end(),
95 "Memory address range for %s is overlapping\n",
98 // this type of memory is used e.g. as reference memory by
99 // Ruby, and they also needs a backing store, but should
100 // not be part of the global address map
102 "Skipping memory %s that is not in global address map\n",
106 fatal_if(m
->getAddrRange().interleaved(),
107 "Memory %s that is not in the global address map cannot "
108 "be interleaved\n", m
->name());
110 // simply do it independently, also note that this kind of
111 // memories are allowed to overlap in the logic address
113 vector
<AbstractMemory
*> unmapped_mems
{m
};
114 createBackingStore(m
->getAddrRange(), unmapped_mems
);
118 // iterate over the increasing addresses and chunks of contiguous
119 // space to be mapped to backing store, create it and inform the
121 vector
<AddrRange
> intlv_ranges
;
122 vector
<AbstractMemory
*> curr_memories
;
123 for (const auto& r
: addrMap
) {
124 // simply skip past all memories that are null and hence do
125 // not need any backing store
126 if (!r
.second
->isNull()) {
127 // if the range is interleaved then save it for now
128 if (r
.first
.interleaved()) {
129 // if we already got interleaved ranges that are not
130 // part of the same range, then first do a merge
131 // before we add the new one
132 if (!intlv_ranges
.empty() &&
133 !intlv_ranges
.back().mergesWith(r
.first
)) {
134 AddrRange
merged_range(intlv_ranges
);
135 createBackingStore(merged_range
, curr_memories
);
136 intlv_ranges
.clear();
137 curr_memories
.clear();
139 intlv_ranges
.push_back(r
.first
);
140 curr_memories
.push_back(r
.second
);
142 vector
<AbstractMemory
*> single_memory
{r
.second
};
143 createBackingStore(r
.first
, single_memory
);
148 // if there is still interleaved ranges waiting to be merged, go
150 if (!intlv_ranges
.empty()) {
151 AddrRange
merged_range(intlv_ranges
);
152 createBackingStore(merged_range
, curr_memories
);
157 PhysicalMemory::createBackingStore(AddrRange range
,
158 const vector
<AbstractMemory
*>& _memories
)
160 panic_if(range
.interleaved(),
161 "Cannot create backing store for interleaved range %s\n",
164 // perform the actual mmap
165 DPRINTF(AddrRanges
, "Creating backing store for range %s with size %d\n",
166 range
.to_string(), range
.size());
167 int map_flags
= MAP_ANON
| MAP_PRIVATE
;
169 // to be able to simulate very large memories, the user can opt to
170 // pass noreserve to mmap
171 if (mmapUsingNoReserve
) {
172 map_flags
|= MAP_NORESERVE
;
175 uint8_t* pmem
= (uint8_t*) mmap(NULL
, range
.size(),
176 PROT_READ
| PROT_WRITE
,
179 if (pmem
== (uint8_t*) MAP_FAILED
) {
181 fatal("Could not mmap %d bytes for range %s!\n", range
.size(),
185 // remember this backing store so we can checkpoint it and unmap
187 backingStore
.push_back(make_pair(range
, pmem
));
189 // point the memories to their backing store
190 for (const auto& m
: _memories
) {
191 DPRINTF(AddrRanges
, "Mapping memory %s to backing store\n",
193 m
->setBackingStore(pmem
);
197 PhysicalMemory::~PhysicalMemory()
199 // unmap the backing store
200 for (auto& s
: backingStore
)
201 munmap((char*)s
.second
, s
.first
.size());
205 PhysicalMemory::isMemAddr(Addr addr
) const
207 // see if the address is within the last matched range
208 if (rangeCache
!= addrMap
.end() && rangeCache
->first
.contains(addr
)) {
211 // lookup in the interval tree
212 const auto& r
= addrMap
.find(addr
);
213 if (r
== addrMap
.end()) {
214 // not in the cache, and not in the tree
217 // the range is in the tree, update the cache
224 PhysicalMemory::getConfAddrRanges() const
226 // this could be done once in the constructor, but since it is unlikely to
227 // be called more than once the iteration should not be a problem
228 AddrRangeList ranges
;
229 vector
<AddrRange
> intlv_ranges
;
230 for (const auto& r
: addrMap
) {
231 if (r
.second
->isConfReported()) {
232 // if the range is interleaved then save it for now
233 if (r
.first
.interleaved()) {
234 // if we already got interleaved ranges that are not
235 // part of the same range, then first do a merge
236 // before we add the new one
237 if (!intlv_ranges
.empty() &&
238 !intlv_ranges
.back().mergesWith(r
.first
)) {
239 ranges
.push_back(AddrRange(intlv_ranges
));
240 intlv_ranges
.clear();
242 intlv_ranges
.push_back(r
.first
);
244 // keep the current range
245 ranges
.push_back(r
.first
);
250 // if there is still interleaved ranges waiting to be merged,
251 // go ahead and do it
252 if (!intlv_ranges
.empty()) {
253 ranges
.push_back(AddrRange(intlv_ranges
));
260 PhysicalMemory::access(PacketPtr pkt
)
262 assert(pkt
->isRequest());
263 Addr addr
= pkt
->getAddr();
264 if (rangeCache
!= addrMap
.end() && rangeCache
->first
.contains(addr
)) {
265 rangeCache
->second
->access(pkt
);
267 // do not update the cache here, as we typically call
268 // isMemAddr before calling access
269 const auto& m
= addrMap
.find(addr
);
270 assert(m
!= addrMap
.end());
271 m
->second
->access(pkt
);
276 PhysicalMemory::functionalAccess(PacketPtr pkt
)
278 assert(pkt
->isRequest());
279 Addr addr
= pkt
->getAddr();
280 if (rangeCache
!= addrMap
.end() && rangeCache
->first
.contains(addr
)) {
281 rangeCache
->second
->functionalAccess(pkt
);
283 // do not update the cache here, as we typically call
284 // isMemAddr before calling functionalAccess
285 const auto& m
= addrMap
.find(addr
);
286 assert(m
!= addrMap
.end());
287 m
->second
->functionalAccess(pkt
);
292 PhysicalMemory::serialize(CheckpointOut
&cp
) const
294 // serialize all the locked addresses and their context ids
295 vector
<Addr
> lal_addr
;
296 vector
<ContextID
> lal_cid
;
298 for (auto& m
: memories
) {
299 const list
<LockedAddr
>& locked_addrs
= m
->getLockedAddrList();
300 for (const auto& l
: locked_addrs
) {
301 lal_addr
.push_back(l
.addr
);
302 lal_cid
.push_back(l
.contextId
);
306 SERIALIZE_CONTAINER(lal_addr
);
307 SERIALIZE_CONTAINER(lal_cid
);
309 // serialize the backing stores
310 unsigned int nbr_of_stores
= backingStore
.size();
311 SERIALIZE_SCALAR(nbr_of_stores
);
313 unsigned int store_id
= 0;
314 // store each backing store memory segment in a file
315 for (auto& s
: backingStore
) {
316 ScopedCheckpointSection
sec(cp
, csprintf("store%d", store_id
));
317 serializeStore(cp
, store_id
++, s
.first
, s
.second
);
322 PhysicalMemory::serializeStore(CheckpointOut
&cp
, unsigned int store_id
,
323 AddrRange range
, uint8_t* pmem
) const
325 // we cannot use the address range for the name as the
326 // memories that are not part of the address map can overlap
327 string filename
= name() + ".store" + to_string(store_id
) + ".pmem";
328 long range_size
= range
.size();
330 DPRINTF(Checkpoint
, "Serializing physical memory %s with size %d\n",
331 filename
, range_size
);
333 SERIALIZE_SCALAR(store_id
);
334 SERIALIZE_SCALAR(filename
);
335 SERIALIZE_SCALAR(range_size
);
338 string filepath
= CheckpointIn::dir() + "/" + filename
.c_str();
339 gzFile compressed_mem
= gzopen(filepath
.c_str(), "wb");
340 if (compressed_mem
== NULL
)
341 fatal("Can't open physical memory checkpoint file '%s'\n",
344 uint64_t pass_size
= 0;
346 // gzwrite fails if (int)len < 0 (gzwrite returns int)
347 for (uint64_t written
= 0; written
< range
.size();
348 written
+= pass_size
) {
349 pass_size
= (uint64_t)INT_MAX
< (range
.size() - written
) ?
350 (uint64_t)INT_MAX
: (range
.size() - written
);
352 if (gzwrite(compressed_mem
, pmem
+ written
,
353 (unsigned int) pass_size
) != (int) pass_size
) {
354 fatal("Write failed on physical memory checkpoint file '%s'\n",
359 // close the compressed stream and check that the exit status
361 if (gzclose(compressed_mem
))
362 fatal("Close failed on physical memory checkpoint file '%s'\n",
368 PhysicalMemory::unserialize(CheckpointIn
&cp
)
370 // unserialize the locked addresses and map them to the
371 // appropriate memory controller
372 vector
<Addr
> lal_addr
;
373 vector
<ContextID
> lal_cid
;
374 UNSERIALIZE_CONTAINER(lal_addr
);
375 UNSERIALIZE_CONTAINER(lal_cid
);
376 for(size_t i
= 0; i
< lal_addr
.size(); ++i
) {
377 const auto& m
= addrMap
.find(lal_addr
[i
]);
378 m
->second
->addLockedAddr(LockedAddr(lal_addr
[i
], lal_cid
[i
]));
381 // unserialize the backing stores
382 unsigned int nbr_of_stores
;
383 UNSERIALIZE_SCALAR(nbr_of_stores
);
385 for (unsigned int i
= 0; i
< nbr_of_stores
; ++i
) {
386 ScopedCheckpointSection
sec(cp
, csprintf("store%d", i
));
387 unserializeStore(cp
);
393 PhysicalMemory::unserializeStore(CheckpointIn
&cp
)
395 const uint32_t chunk_size
= 16384;
397 unsigned int store_id
;
398 UNSERIALIZE_SCALAR(store_id
);
401 UNSERIALIZE_SCALAR(filename
);
402 string filepath
= cp
.cptDir
+ "/" + filename
;
405 gzFile compressed_mem
= gzopen(filepath
.c_str(), "rb");
406 if (compressed_mem
== NULL
)
407 fatal("Can't open physical memory checkpoint file '%s'", filename
);
409 // we've already got the actual backing store mapped
410 uint8_t* pmem
= backingStore
[store_id
].second
;
411 AddrRange range
= backingStore
[store_id
].first
;
414 UNSERIALIZE_SCALAR(range_size
);
416 DPRINTF(Checkpoint
, "Unserializing physical memory %s with size %d\n",
417 filename
, range_size
);
419 if (range_size
!= range
.size())
420 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
421 range_size
, range
.size());
423 uint64_t curr_size
= 0;
424 long* temp_page
= new long[chunk_size
];
427 while (curr_size
< range
.size()) {
428 bytes_read
= gzread(compressed_mem
, temp_page
, chunk_size
);
432 assert(bytes_read
% sizeof(long) == 0);
434 for (uint32_t x
= 0; x
< bytes_read
/ sizeof(long); x
++) {
435 // Only copy bytes that are non-zero, so we don't give
436 // the VM system hell
437 if (*(temp_page
+ x
) != 0) {
438 pmem_current
= (long*)(pmem
+ curr_size
+ x
* sizeof(long));
439 *pmem_current
= *(temp_page
+ x
);
442 curr_size
+= bytes_read
;
447 if (gzclose(compressed_mem
))
448 fatal("Close failed on physical memory checkpoint file '%s'\n",