arch-power: Refactor process initialization
[gem5.git] / src / mem / physical.cc
1 /*
2 * Copyright (c) 2012, 2014, 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "mem/physical.hh"
39
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <sys/user.h>
44 #include <unistd.h>
45 #include <zlib.h>
46
47 #include <cerrno>
48 #include <climits>
49 #include <cstdio>
50 #include <iostream>
51 #include <string>
52
53 #include "base/trace.hh"
54 #include "debug/AddrRanges.hh"
55 #include "debug/Checkpoint.hh"
56 #include "mem/abstract_mem.hh"
57 #include "sim/serialize.hh"
58
59 /**
60 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
61 * without committing to actually providing the swap space on the
62 * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
63 * so simply make it 0.
64 */
65 #if defined(__APPLE__) || defined(__FreeBSD__)
66 #ifndef MAP_NORESERVE
67 #define MAP_NORESERVE 0
68 #endif
69 #endif
70
71 PhysicalMemory::PhysicalMemory(const std::string& _name,
72 const std::vector<AbstractMemory*>& _memories,
73 bool mmap_using_noreserve,
74 const std::string& shared_backstore) :
75 _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve),
76 sharedBackstore(shared_backstore)
77 {
78 if (mmap_using_noreserve)
79 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
80
81 // add the memories from the system to the address map as
82 // appropriate
83 for (const auto& m : _memories) {
84 // only add the memory if it is part of the global address map
85 if (m->isInAddrMap()) {
86 memories.push_back(m);
87
88 // calculate the total size once and for all
89 size += m->size();
90
91 // add the range to our interval tree and make sure it does not
92 // intersect an existing range
93 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
94 "Memory address range for %s is overlapping\n",
95 m->name());
96 } else {
97 // this type of memory is used e.g. as reference memory by
98 // Ruby, and they also needs a backing store, but should
99 // not be part of the global address map
100 DPRINTF(AddrRanges,
101 "Skipping memory %s that is not in global address map\n",
102 m->name());
103
104 // sanity check
105 fatal_if(m->getAddrRange().interleaved(),
106 "Memory %s that is not in the global address map cannot "
107 "be interleaved\n", m->name());
108
109 // simply do it independently, also note that this kind of
110 // memories are allowed to overlap in the logic address
111 // map
112 std::vector<AbstractMemory*> unmapped_mems{m};
113 createBackingStore(m->getAddrRange(), unmapped_mems,
114 m->isConfReported(), m->isInAddrMap(),
115 m->isKvmMap());
116 }
117 }
118
119 // iterate over the increasing addresses and chunks of contiguous
120 // space to be mapped to backing store, create it and inform the
121 // memories
122 std::vector<AddrRange> intlv_ranges;
123 std::vector<AbstractMemory*> curr_memories;
124 for (const auto& r : addrMap) {
125 // simply skip past all memories that are null and hence do
126 // not need any backing store
127 if (!r.second->isNull()) {
128 // if the range is interleaved then save it for now
129 if (r.first.interleaved()) {
130 // if we already got interleaved ranges that are not
131 // part of the same range, then first do a merge
132 // before we add the new one
133 if (!intlv_ranges.empty() &&
134 !intlv_ranges.back().mergesWith(r.first)) {
135 AddrRange merged_range(intlv_ranges);
136
137 AbstractMemory *f = curr_memories.front();
138 for (const auto& c : curr_memories)
139 if (f->isConfReported() != c->isConfReported() ||
140 f->isInAddrMap() != c->isInAddrMap() ||
141 f->isKvmMap() != c->isKvmMap())
142 fatal("Inconsistent flags in an interleaved "
143 "range\n");
144
145 createBackingStore(merged_range, curr_memories,
146 f->isConfReported(), f->isInAddrMap(),
147 f->isKvmMap());
148
149 intlv_ranges.clear();
150 curr_memories.clear();
151 }
152 intlv_ranges.push_back(r.first);
153 curr_memories.push_back(r.second);
154 } else {
155 std::vector<AbstractMemory*> single_memory{r.second};
156 createBackingStore(r.first, single_memory,
157 r.second->isConfReported(),
158 r.second->isInAddrMap(),
159 r.second->isKvmMap());
160 }
161 }
162 }
163
164 // if there is still interleaved ranges waiting to be merged, go
165 // ahead and do it
166 if (!intlv_ranges.empty()) {
167 AddrRange merged_range(intlv_ranges);
168
169 AbstractMemory *f = curr_memories.front();
170 for (const auto& c : curr_memories)
171 if (f->isConfReported() != c->isConfReported() ||
172 f->isInAddrMap() != c->isInAddrMap() ||
173 f->isKvmMap() != c->isKvmMap())
174 fatal("Inconsistent flags in an interleaved "
175 "range\n");
176
177 createBackingStore(merged_range, curr_memories,
178 f->isConfReported(), f->isInAddrMap(),
179 f->isKvmMap());
180 }
181 }
182
183 void
184 PhysicalMemory::createBackingStore(
185 AddrRange range, const std::vector<AbstractMemory*>& _memories,
186 bool conf_table_reported, bool in_addr_map, bool kvm_map)
187 {
188 panic_if(range.interleaved(),
189 "Cannot create backing store for interleaved range %s\n",
190 range.to_string());
191
192 // perform the actual mmap
193 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
194 range.to_string(), range.size());
195
196 int shm_fd;
197 int map_flags;
198
199 if (sharedBackstore.empty()) {
200 shm_fd = -1;
201 map_flags = MAP_ANON | MAP_PRIVATE;
202 } else {
203 DPRINTF(AddrRanges, "Sharing backing store as %s\n",
204 sharedBackstore.c_str());
205 shm_fd = shm_open(sharedBackstore.c_str(), O_CREAT | O_RDWR, 0666);
206 if (shm_fd == -1)
207 panic("Shared memory failed");
208 if (ftruncate(shm_fd, range.size()))
209 panic("Setting size of shared memory failed");
210 map_flags = MAP_SHARED;
211 }
212
213 // to be able to simulate very large memories, the user can opt to
214 // pass noreserve to mmap
215 if (mmapUsingNoReserve) {
216 map_flags |= MAP_NORESERVE;
217 }
218
219 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
220 PROT_READ | PROT_WRITE,
221 map_flags, shm_fd, 0);
222
223 if (pmem == (uint8_t*) MAP_FAILED) {
224 perror("mmap");
225 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
226 range.to_string());
227 }
228
229 // remember this backing store so we can checkpoint it and unmap
230 // it appropriately
231 backingStore.emplace_back(range, pmem,
232 conf_table_reported, in_addr_map, kvm_map);
233
234 // point the memories to their backing store
235 for (const auto& m : _memories) {
236 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
237 m->name());
238 m->setBackingStore(pmem);
239 }
240 }
241
242 PhysicalMemory::~PhysicalMemory()
243 {
244 // unmap the backing store
245 for (auto& s : backingStore)
246 munmap((char*)s.pmem, s.range.size());
247 }
248
249 bool
250 PhysicalMemory::isMemAddr(Addr addr) const
251 {
252 return addrMap.contains(addr) != addrMap.end();
253 }
254
255 AddrRangeList
256 PhysicalMemory::getConfAddrRanges() const
257 {
258 // this could be done once in the constructor, but since it is unlikely to
259 // be called more than once the iteration should not be a problem
260 AddrRangeList ranges;
261 std::vector<AddrRange> intlv_ranges;
262 for (const auto& r : addrMap) {
263 if (r.second->isConfReported()) {
264 // if the range is interleaved then save it for now
265 if (r.first.interleaved()) {
266 // if we already got interleaved ranges that are not
267 // part of the same range, then first do a merge
268 // before we add the new one
269 if (!intlv_ranges.empty() &&
270 !intlv_ranges.back().mergesWith(r.first)) {
271 ranges.push_back(AddrRange(intlv_ranges));
272 intlv_ranges.clear();
273 }
274 intlv_ranges.push_back(r.first);
275 } else {
276 // keep the current range
277 ranges.push_back(r.first);
278 }
279 }
280 }
281
282 // if there is still interleaved ranges waiting to be merged,
283 // go ahead and do it
284 if (!intlv_ranges.empty()) {
285 ranges.push_back(AddrRange(intlv_ranges));
286 }
287
288 return ranges;
289 }
290
291 void
292 PhysicalMemory::access(PacketPtr pkt)
293 {
294 assert(pkt->isRequest());
295 const auto& m = addrMap.contains(pkt->getAddrRange());
296 assert(m != addrMap.end());
297 m->second->access(pkt);
298 }
299
300 void
301 PhysicalMemory::functionalAccess(PacketPtr pkt)
302 {
303 assert(pkt->isRequest());
304 const auto& m = addrMap.contains(pkt->getAddrRange());
305 assert(m != addrMap.end());
306 m->second->functionalAccess(pkt);
307 }
308
309 void
310 PhysicalMemory::serialize(CheckpointOut &cp) const
311 {
312 // serialize all the locked addresses and their context ids
313 std::vector<Addr> lal_addr;
314 std::vector<ContextID> lal_cid;
315
316 for (auto& m : memories) {
317 const std::list<LockedAddr>& locked_addrs = m->getLockedAddrList();
318 for (const auto& l : locked_addrs) {
319 lal_addr.push_back(l.addr);
320 lal_cid.push_back(l.contextId);
321 }
322 }
323
324 SERIALIZE_CONTAINER(lal_addr);
325 SERIALIZE_CONTAINER(lal_cid);
326
327 // serialize the backing stores
328 unsigned int nbr_of_stores = backingStore.size();
329 SERIALIZE_SCALAR(nbr_of_stores);
330
331 unsigned int store_id = 0;
332 // store each backing store memory segment in a file
333 for (auto& s : backingStore) {
334 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
335 serializeStore(cp, store_id++, s.range, s.pmem);
336 }
337 }
338
339 void
340 PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
341 AddrRange range, uint8_t* pmem) const
342 {
343 // we cannot use the address range for the name as the
344 // memories that are not part of the address map can overlap
345 std::string filename =
346 name() + ".store" + std::to_string(store_id) + ".pmem";
347 long range_size = range.size();
348
349 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
350 filename, range_size);
351
352 SERIALIZE_SCALAR(store_id);
353 SERIALIZE_SCALAR(filename);
354 SERIALIZE_SCALAR(range_size);
355
356 // write memory file
357 std::string filepath = CheckpointIn::dir() + "/" + filename.c_str();
358 gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
359 if (compressed_mem == NULL)
360 fatal("Can't open physical memory checkpoint file '%s'\n",
361 filename);
362
363 uint64_t pass_size = 0;
364
365 // gzwrite fails if (int)len < 0 (gzwrite returns int)
366 for (uint64_t written = 0; written < range.size();
367 written += pass_size) {
368 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
369 (uint64_t)INT_MAX : (range.size() - written);
370
371 if (gzwrite(compressed_mem, pmem + written,
372 (unsigned int) pass_size) != (int) pass_size) {
373 fatal("Write failed on physical memory checkpoint file '%s'\n",
374 filename);
375 }
376 }
377
378 // close the compressed stream and check that the exit status
379 // is zero
380 if (gzclose(compressed_mem))
381 fatal("Close failed on physical memory checkpoint file '%s'\n",
382 filename);
383
384 }
385
386 void
387 PhysicalMemory::unserialize(CheckpointIn &cp)
388 {
389 // unserialize the locked addresses and map them to the
390 // appropriate memory controller
391 std::vector<Addr> lal_addr;
392 std::vector<ContextID> lal_cid;
393 UNSERIALIZE_CONTAINER(lal_addr);
394 UNSERIALIZE_CONTAINER(lal_cid);
395 for (size_t i = 0; i < lal_addr.size(); ++i) {
396 const auto& m = addrMap.contains(lal_addr[i]);
397 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
398 }
399
400 // unserialize the backing stores
401 unsigned int nbr_of_stores;
402 UNSERIALIZE_SCALAR(nbr_of_stores);
403
404 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
405 ScopedCheckpointSection sec(cp, csprintf("store%d", i));
406 unserializeStore(cp);
407 }
408
409 }
410
411 void
412 PhysicalMemory::unserializeStore(CheckpointIn &cp)
413 {
414 const uint32_t chunk_size = 16384;
415
416 unsigned int store_id;
417 UNSERIALIZE_SCALAR(store_id);
418
419 std::string filename;
420 UNSERIALIZE_SCALAR(filename);
421 std::string filepath = cp.getCptDir() + "/" + filename;
422
423 // mmap memoryfile
424 gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
425 if (compressed_mem == NULL)
426 fatal("Can't open physical memory checkpoint file '%s'", filename);
427
428 // we've already got the actual backing store mapped
429 uint8_t* pmem = backingStore[store_id].pmem;
430 AddrRange range = backingStore[store_id].range;
431
432 long range_size;
433 UNSERIALIZE_SCALAR(range_size);
434
435 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
436 filename, range_size);
437
438 if (range_size != range.size())
439 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
440 range_size, range.size());
441
442 uint64_t curr_size = 0;
443 long* temp_page = new long[chunk_size];
444 long* pmem_current;
445 uint32_t bytes_read;
446 while (curr_size < range.size()) {
447 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
448 if (bytes_read == 0)
449 break;
450
451 assert(bytes_read % sizeof(long) == 0);
452
453 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
454 // Only copy bytes that are non-zero, so we don't give
455 // the VM system hell
456 if (*(temp_page + x) != 0) {
457 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
458 *pmem_current = *(temp_page + x);
459 }
460 }
461 curr_size += bytes_read;
462 }
463
464 delete[] temp_page;
465
466 if (gzclose(compressed_mem))
467 fatal("Close failed on physical memory checkpoint file '%s'\n",
468 filename);
469 }