c7dbb3bcb6e454529d007a03d7687e212c763e1b
2 * Copyright (c) 2012, 2014, 2018 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Andreas Hansson
40 #include "mem/physical.hh"
44 #include <sys/types.h>
55 #include "base/trace.hh"
56 #include "debug/AddrRanges.hh"
57 #include "debug/Checkpoint.hh"
58 #include "mem/abstract_mem.hh"
61 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
62 * without committing to actually providing the swap space on the
63 * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
64 * so simply make it 0.
66 #if defined(__APPLE__) || defined(__FreeBSD__)
68 #define MAP_NORESERVE 0
74 PhysicalMemory::PhysicalMemory(const string
& _name
,
75 const vector
<AbstractMemory
*>& _memories
,
76 bool mmap_using_noreserve
) :
77 _name(_name
), rangeCache(addrMap
.end()), size(0),
78 mmapUsingNoReserve(mmap_using_noreserve
)
80 if (mmap_using_noreserve
)
81 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
83 // add the memories from the system to the address map as
85 for (const auto& m
: _memories
) {
86 // only add the memory if it is part of the global address map
87 if (m
->isInAddrMap()) {
88 memories
.push_back(m
);
90 // calculate the total size once and for all
93 // add the range to our interval tree and make sure it does not
94 // intersect an existing range
95 fatal_if(addrMap
.insert(m
->getAddrRange(), m
) == addrMap
.end(),
96 "Memory address range for %s is overlapping\n",
99 // this type of memory is used e.g. as reference memory by
100 // Ruby, and they also needs a backing store, but should
101 // not be part of the global address map
103 "Skipping memory %s that is not in global address map\n",
107 fatal_if(m
->getAddrRange().interleaved(),
108 "Memory %s that is not in the global address map cannot "
109 "be interleaved\n", m
->name());
111 // simply do it independently, also note that this kind of
112 // memories are allowed to overlap in the logic address
114 vector
<AbstractMemory
*> unmapped_mems
{m
};
115 createBackingStore(m
->getAddrRange(), unmapped_mems
,
116 m
->isConfReported(), m
->isInAddrMap(),
121 // iterate over the increasing addresses and chunks of contiguous
122 // space to be mapped to backing store, create it and inform the
124 vector
<AddrRange
> intlv_ranges
;
125 vector
<AbstractMemory
*> curr_memories
;
126 for (const auto& r
: addrMap
) {
127 // simply skip past all memories that are null and hence do
128 // not need any backing store
129 if (!r
.second
->isNull()) {
130 // if the range is interleaved then save it for now
131 if (r
.first
.interleaved()) {
132 // if we already got interleaved ranges that are not
133 // part of the same range, then first do a merge
134 // before we add the new one
135 if (!intlv_ranges
.empty() &&
136 !intlv_ranges
.back().mergesWith(r
.first
)) {
137 AddrRange
merged_range(intlv_ranges
);
139 AbstractMemory
*f
= curr_memories
.front();
140 for (const auto& c
: curr_memories
)
141 if (f
->isConfReported() != c
->isConfReported() ||
142 f
->isInAddrMap() != c
->isInAddrMap() ||
143 f
->isKvmMap() != c
->isKvmMap())
144 fatal("Inconsistent flags in an interleaved "
147 createBackingStore(merged_range
, curr_memories
,
148 f
->isConfReported(), f
->isInAddrMap(),
151 intlv_ranges
.clear();
152 curr_memories
.clear();
154 intlv_ranges
.push_back(r
.first
);
155 curr_memories
.push_back(r
.second
);
157 vector
<AbstractMemory
*> single_memory
{r
.second
};
158 createBackingStore(r
.first
, single_memory
,
159 r
.second
->isConfReported(),
160 r
.second
->isInAddrMap(),
161 r
.second
->isKvmMap());
166 // if there is still interleaved ranges waiting to be merged, go
168 if (!intlv_ranges
.empty()) {
169 AddrRange
merged_range(intlv_ranges
);
171 AbstractMemory
*f
= curr_memories
.front();
172 for (const auto& c
: curr_memories
)
173 if (f
->isConfReported() != c
->isConfReported() ||
174 f
->isInAddrMap() != c
->isInAddrMap() ||
175 f
->isKvmMap() != c
->isKvmMap())
176 fatal("Inconsistent flags in an interleaved "
179 createBackingStore(merged_range
, curr_memories
,
180 f
->isConfReported(), f
->isInAddrMap(),
186 PhysicalMemory::createBackingStore(AddrRange range
,
187 const vector
<AbstractMemory
*>& _memories
,
188 bool conf_table_reported
,
189 bool in_addr_map
, bool kvm_map
)
191 panic_if(range
.interleaved(),
192 "Cannot create backing store for interleaved range %s\n",
195 // perform the actual mmap
196 DPRINTF(AddrRanges
, "Creating backing store for range %s with size %d\n",
197 range
.to_string(), range
.size());
198 int map_flags
= MAP_ANON
| MAP_PRIVATE
;
200 // to be able to simulate very large memories, the user can opt to
201 // pass noreserve to mmap
202 if (mmapUsingNoReserve
) {
203 map_flags
|= MAP_NORESERVE
;
206 uint8_t* pmem
= (uint8_t*) mmap(NULL
, range
.size(),
207 PROT_READ
| PROT_WRITE
,
210 if (pmem
== (uint8_t*) MAP_FAILED
) {
212 fatal("Could not mmap %d bytes for range %s!\n", range
.size(),
216 // remember this backing store so we can checkpoint it and unmap
218 backingStore
.emplace_back(range
, pmem
,
219 conf_table_reported
, in_addr_map
, kvm_map
);
221 // point the memories to their backing store
222 for (const auto& m
: _memories
) {
223 DPRINTF(AddrRanges
, "Mapping memory %s to backing store\n",
225 m
->setBackingStore(pmem
);
229 PhysicalMemory::~PhysicalMemory()
231 // unmap the backing store
232 for (auto& s
: backingStore
)
233 munmap((char*)s
.pmem
, s
.range
.size());
237 PhysicalMemory::isMemAddr(Addr addr
) const
239 // see if the address is within the last matched range
240 if (rangeCache
!= addrMap
.end() && rangeCache
->first
.contains(addr
)) {
243 // lookup in the interval tree
244 const auto& r
= addrMap
.contains(addr
);
245 if (r
== addrMap
.end()) {
246 // not in the cache, and not in the tree
249 // the range is in the tree, update the cache
256 PhysicalMemory::getConfAddrRanges() const
258 // this could be done once in the constructor, but since it is unlikely to
259 // be called more than once the iteration should not be a problem
260 AddrRangeList ranges
;
261 vector
<AddrRange
> intlv_ranges
;
262 for (const auto& r
: addrMap
) {
263 if (r
.second
->isConfReported()) {
264 // if the range is interleaved then save it for now
265 if (r
.first
.interleaved()) {
266 // if we already got interleaved ranges that are not
267 // part of the same range, then first do a merge
268 // before we add the new one
269 if (!intlv_ranges
.empty() &&
270 !intlv_ranges
.back().mergesWith(r
.first
)) {
271 ranges
.push_back(AddrRange(intlv_ranges
));
272 intlv_ranges
.clear();
274 intlv_ranges
.push_back(r
.first
);
276 // keep the current range
277 ranges
.push_back(r
.first
);
282 // if there is still interleaved ranges waiting to be merged,
283 // go ahead and do it
284 if (!intlv_ranges
.empty()) {
285 ranges
.push_back(AddrRange(intlv_ranges
));
292 PhysicalMemory::access(PacketPtr pkt
)
294 assert(pkt
->isRequest());
295 Addr addr
= pkt
->getAddr();
296 if (rangeCache
!= addrMap
.end() && rangeCache
->first
.contains(addr
)) {
297 rangeCache
->second
->access(pkt
);
299 // do not update the cache here, as we typically call
300 // isMemAddr before calling access
301 const auto& m
= addrMap
.contains(addr
);
302 assert(m
!= addrMap
.end());
303 m
->second
->access(pkt
);
308 PhysicalMemory::functionalAccess(PacketPtr pkt
)
310 assert(pkt
->isRequest());
311 Addr addr
= pkt
->getAddr();
312 if (rangeCache
!= addrMap
.end() && rangeCache
->first
.contains(addr
)) {
313 rangeCache
->second
->functionalAccess(pkt
);
315 // do not update the cache here, as we typically call
316 // isMemAddr before calling functionalAccess
317 const auto& m
= addrMap
.contains(addr
);
318 assert(m
!= addrMap
.end());
319 m
->second
->functionalAccess(pkt
);
324 PhysicalMemory::serialize(CheckpointOut
&cp
) const
326 // serialize all the locked addresses and their context ids
327 vector
<Addr
> lal_addr
;
328 vector
<ContextID
> lal_cid
;
330 for (auto& m
: memories
) {
331 const list
<LockedAddr
>& locked_addrs
= m
->getLockedAddrList();
332 for (const auto& l
: locked_addrs
) {
333 lal_addr
.push_back(l
.addr
);
334 lal_cid
.push_back(l
.contextId
);
338 SERIALIZE_CONTAINER(lal_addr
);
339 SERIALIZE_CONTAINER(lal_cid
);
341 // serialize the backing stores
342 unsigned int nbr_of_stores
= backingStore
.size();
343 SERIALIZE_SCALAR(nbr_of_stores
);
345 unsigned int store_id
= 0;
346 // store each backing store memory segment in a file
347 for (auto& s
: backingStore
) {
348 ScopedCheckpointSection
sec(cp
, csprintf("store%d", store_id
));
349 serializeStore(cp
, store_id
++, s
.range
, s
.pmem
);
354 PhysicalMemory::serializeStore(CheckpointOut
&cp
, unsigned int store_id
,
355 AddrRange range
, uint8_t* pmem
) const
357 // we cannot use the address range for the name as the
358 // memories that are not part of the address map can overlap
359 string filename
= name() + ".store" + to_string(store_id
) + ".pmem";
360 long range_size
= range
.size();
362 DPRINTF(Checkpoint
, "Serializing physical memory %s with size %d\n",
363 filename
, range_size
);
365 SERIALIZE_SCALAR(store_id
);
366 SERIALIZE_SCALAR(filename
);
367 SERIALIZE_SCALAR(range_size
);
370 string filepath
= CheckpointIn::dir() + "/" + filename
.c_str();
371 gzFile compressed_mem
= gzopen(filepath
.c_str(), "wb");
372 if (compressed_mem
== NULL
)
373 fatal("Can't open physical memory checkpoint file '%s'\n",
376 uint64_t pass_size
= 0;
378 // gzwrite fails if (int)len < 0 (gzwrite returns int)
379 for (uint64_t written
= 0; written
< range
.size();
380 written
+= pass_size
) {
381 pass_size
= (uint64_t)INT_MAX
< (range
.size() - written
) ?
382 (uint64_t)INT_MAX
: (range
.size() - written
);
384 if (gzwrite(compressed_mem
, pmem
+ written
,
385 (unsigned int) pass_size
) != (int) pass_size
) {
386 fatal("Write failed on physical memory checkpoint file '%s'\n",
391 // close the compressed stream and check that the exit status
393 if (gzclose(compressed_mem
))
394 fatal("Close failed on physical memory checkpoint file '%s'\n",
400 PhysicalMemory::unserialize(CheckpointIn
&cp
)
402 // unserialize the locked addresses and map them to the
403 // appropriate memory controller
404 vector
<Addr
> lal_addr
;
405 vector
<ContextID
> lal_cid
;
406 UNSERIALIZE_CONTAINER(lal_addr
);
407 UNSERIALIZE_CONTAINER(lal_cid
);
408 for (size_t i
= 0; i
< lal_addr
.size(); ++i
) {
409 const auto& m
= addrMap
.contains(lal_addr
[i
]);
410 m
->second
->addLockedAddr(LockedAddr(lal_addr
[i
], lal_cid
[i
]));
413 // unserialize the backing stores
414 unsigned int nbr_of_stores
;
415 UNSERIALIZE_SCALAR(nbr_of_stores
);
417 for (unsigned int i
= 0; i
< nbr_of_stores
; ++i
) {
418 ScopedCheckpointSection
sec(cp
, csprintf("store%d", i
));
419 unserializeStore(cp
);
425 PhysicalMemory::unserializeStore(CheckpointIn
&cp
)
427 const uint32_t chunk_size
= 16384;
429 unsigned int store_id
;
430 UNSERIALIZE_SCALAR(store_id
);
433 UNSERIALIZE_SCALAR(filename
);
434 string filepath
= cp
.cptDir
+ "/" + filename
;
437 gzFile compressed_mem
= gzopen(filepath
.c_str(), "rb");
438 if (compressed_mem
== NULL
)
439 fatal("Can't open physical memory checkpoint file '%s'", filename
);
441 // we've already got the actual backing store mapped
442 uint8_t* pmem
= backingStore
[store_id
].pmem
;
443 AddrRange range
= backingStore
[store_id
].range
;
446 UNSERIALIZE_SCALAR(range_size
);
448 DPRINTF(Checkpoint
, "Unserializing physical memory %s with size %d\n",
449 filename
, range_size
);
451 if (range_size
!= range
.size())
452 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
453 range_size
, range
.size());
455 uint64_t curr_size
= 0;
456 long* temp_page
= new long[chunk_size
];
459 while (curr_size
< range
.size()) {
460 bytes_read
= gzread(compressed_mem
, temp_page
, chunk_size
);
464 assert(bytes_read
% sizeof(long) == 0);
466 for (uint32_t x
= 0; x
< bytes_read
/ sizeof(long); x
++) {
467 // Only copy bytes that are non-zero, so we don't give
468 // the VM system hell
469 if (*(temp_page
+ x
) != 0) {
470 pmem_current
= (long*)(pmem
+ curr_size
+ x
* sizeof(long));
471 *pmem_current
= *(temp_page
+ x
);
474 curr_size
+= bytes_read
;
479 if (gzclose(compressed_mem
))
480 fatal("Close failed on physical memory checkpoint file '%s'\n",