AddrRange: Transition from Range<T> to AddrRange
[gem5.git] / src / mem / abstract_mem.cc
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 * Ali Saidi
42 * Andreas Hansson
43 */
44
45 #include <sys/mman.h>
46 #include <sys/types.h>
47 #include <sys/user.h>
48 #include <fcntl.h>
49 #include <unistd.h>
50 #include <zlib.h>
51
52 #include <cerrno>
53 #include <cstdio>
54 #include <climits>
55 #include <iostream>
56 #include <string>
57
58 #include "arch/registers.hh"
59 #include "config/the_isa.hh"
60 #include "debug/LLSC.hh"
61 #include "debug/MemoryAccess.hh"
62 #include "mem/abstract_mem.hh"
63 #include "mem/packet_access.hh"
64 #include "sim/system.hh"
65
66 using namespace std;
67
68 AbstractMemory::AbstractMemory(const Params *p) :
69 MemObject(p), range(params()->range), pmemAddr(NULL),
70 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
71 _system(NULL)
72 {
73 if (size() % TheISA::PageBytes != 0)
74 panic("Memory Size not divisible by page size\n");
75
76 if (params()->null)
77 return;
78
79 if (params()->file == "") {
80 int map_flags = MAP_ANON | MAP_PRIVATE;
81 pmemAddr = (uint8_t *)mmap(NULL, size(),
82 PROT_READ | PROT_WRITE, map_flags, -1, 0);
83 } else {
84 int map_flags = MAP_PRIVATE;
85 int fd = open(params()->file.c_str(), O_RDONLY);
86 long _size = lseek(fd, 0, SEEK_END);
87 if (_size != range.size()) {
88 fatal("Specified size %d does not match file %s %d\n",
89 range.size(), params()->file, _size);
90 }
91 lseek(fd, 0, SEEK_SET);
92 pmemAddr = (uint8_t *)mmap(NULL, roundUp(_size, sysconf(_SC_PAGESIZE)),
93 PROT_READ | PROT_WRITE, map_flags, fd, 0);
94 }
95
96 if (pmemAddr == (void *)MAP_FAILED) {
97 perror("mmap");
98 if (params()->file == "")
99 fatal("Could not mmap!\n");
100 else
101 fatal("Could not find file: %s\n", params()->file);
102 }
103
104 //If requested, initialize all the memory to 0
105 if (p->zero)
106 memset(pmemAddr, 0, size());
107 }
108
109
110 AbstractMemory::~AbstractMemory()
111 {
112 if (pmemAddr)
113 munmap((char*)pmemAddr, size());
114 }
115
116 void
117 AbstractMemory::regStats()
118 {
119 using namespace Stats;
120
121 assert(system());
122
123 bytesRead
124 .init(system()->maxMasters())
125 .name(name() + ".bytes_read")
126 .desc("Number of bytes read from this memory")
127 .flags(total | nozero | nonan)
128 ;
129 for (int i = 0; i < system()->maxMasters(); i++) {
130 bytesRead.subname(i, system()->getMasterName(i));
131 }
132 bytesInstRead
133 .init(system()->maxMasters())
134 .name(name() + ".bytes_inst_read")
135 .desc("Number of instructions bytes read from this memory")
136 .flags(total | nozero | nonan)
137 ;
138 for (int i = 0; i < system()->maxMasters(); i++) {
139 bytesInstRead.subname(i, system()->getMasterName(i));
140 }
141 bytesWritten
142 .init(system()->maxMasters())
143 .name(name() + ".bytes_written")
144 .desc("Number of bytes written to this memory")
145 .flags(total | nozero | nonan)
146 ;
147 for (int i = 0; i < system()->maxMasters(); i++) {
148 bytesWritten.subname(i, system()->getMasterName(i));
149 }
150 numReads
151 .init(system()->maxMasters())
152 .name(name() + ".num_reads")
153 .desc("Number of read requests responded to by this memory")
154 .flags(total | nozero | nonan)
155 ;
156 for (int i = 0; i < system()->maxMasters(); i++) {
157 numReads.subname(i, system()->getMasterName(i));
158 }
159 numWrites
160 .init(system()->maxMasters())
161 .name(name() + ".num_writes")
162 .desc("Number of write requests responded to by this memory")
163 .flags(total | nozero | nonan)
164 ;
165 for (int i = 0; i < system()->maxMasters(); i++) {
166 numWrites.subname(i, system()->getMasterName(i));
167 }
168 numOther
169 .init(system()->maxMasters())
170 .name(name() + ".num_other")
171 .desc("Number of other requests responded to by this memory")
172 .flags(total | nozero | nonan)
173 ;
174 for (int i = 0; i < system()->maxMasters(); i++) {
175 numOther.subname(i, system()->getMasterName(i));
176 }
177 bwRead
178 .name(name() + ".bw_read")
179 .desc("Total read bandwidth from this memory (bytes/s)")
180 .precision(0)
181 .prereq(bytesRead)
182 .flags(total | nozero | nonan)
183 ;
184 for (int i = 0; i < system()->maxMasters(); i++) {
185 bwRead.subname(i, system()->getMasterName(i));
186 }
187
188 bwInstRead
189 .name(name() + ".bw_inst_read")
190 .desc("Instruction read bandwidth from this memory (bytes/s)")
191 .precision(0)
192 .prereq(bytesInstRead)
193 .flags(total | nozero | nonan)
194 ;
195 for (int i = 0; i < system()->maxMasters(); i++) {
196 bwInstRead.subname(i, system()->getMasterName(i));
197 }
198 bwWrite
199 .name(name() + ".bw_write")
200 .desc("Write bandwidth from this memory (bytes/s)")
201 .precision(0)
202 .prereq(bytesWritten)
203 .flags(total | nozero | nonan)
204 ;
205 for (int i = 0; i < system()->maxMasters(); i++) {
206 bwWrite.subname(i, system()->getMasterName(i));
207 }
208 bwTotal
209 .name(name() + ".bw_total")
210 .desc("Total bandwidth to/from this memory (bytes/s)")
211 .precision(0)
212 .prereq(bwTotal)
213 .flags(total | nozero | nonan)
214 ;
215 for (int i = 0; i < system()->maxMasters(); i++) {
216 bwTotal.subname(i, system()->getMasterName(i));
217 }
218 bwRead = bytesRead / simSeconds;
219 bwInstRead = bytesInstRead / simSeconds;
220 bwWrite = bytesWritten / simSeconds;
221 bwTotal = (bytesRead + bytesWritten) / simSeconds;
222 }
223
224 AddrRange
225 AbstractMemory::getAddrRange() const
226 {
227 return range;
228 }
229
230 // Add load-locked to tracking list. Should only be called if the
231 // operation is a load and the LLSC flag is set.
232 void
233 AbstractMemory::trackLoadLocked(PacketPtr pkt)
234 {
235 Request *req = pkt->req;
236 Addr paddr = LockedAddr::mask(req->getPaddr());
237
238 // first we check if we already have a locked addr for this
239 // xc. Since each xc only gets one, we just update the
240 // existing record with the new address.
241 list<LockedAddr>::iterator i;
242
243 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
244 if (i->matchesContext(req)) {
245 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
246 req->contextId(), paddr);
247 i->addr = paddr;
248 return;
249 }
250 }
251
252 // no record for this xc: need to allocate a new one
253 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
254 req->contextId(), paddr);
255 lockedAddrList.push_front(LockedAddr(req));
256 }
257
258
259 // Called on *writes* only... both regular stores and
260 // store-conditional operations. Check for conventional stores which
261 // conflict with locked addresses, and for success/failure of store
262 // conditionals.
263 bool
264 AbstractMemory::checkLockedAddrList(PacketPtr pkt)
265 {
266 Request *req = pkt->req;
267 Addr paddr = LockedAddr::mask(req->getPaddr());
268 bool isLLSC = pkt->isLLSC();
269
270 // Initialize return value. Non-conditional stores always
271 // succeed. Assume conditional stores will fail until proven
272 // otherwise.
273 bool allowStore = !isLLSC;
274
275 // Iterate over list. Note that there could be multiple matching records,
276 // as more than one context could have done a load locked to this location.
277 // Only remove records when we succeed in finding a record for (xc, addr);
278 // then, remove all records with this address. Failed store-conditionals do
279 // not blow unrelated reservations.
280 list<LockedAddr>::iterator i = lockedAddrList.begin();
281
282 if (isLLSC) {
283 while (i != lockedAddrList.end()) {
284 if (i->addr == paddr && i->matchesContext(req)) {
285 // it's a store conditional, and as far as the memory system can
286 // tell, the requesting context's lock is still valid.
287 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
288 req->contextId(), paddr);
289 allowStore = true;
290 break;
291 }
292 // If we didn't find a match, keep searching! Someone else may well
293 // have a reservation on this line here but we may find ours in just
294 // a little while.
295 i++;
296 }
297 req->setExtraData(allowStore ? 1 : 0);
298 }
299 // LLSCs that succeeded AND non-LLSC stores both fall into here:
300 if (allowStore) {
301 // We write address paddr. However, there may be several entries with a
302 // reservation on this address (for other contextIds) and they must all
303 // be removed.
304 i = lockedAddrList.begin();
305 while (i != lockedAddrList.end()) {
306 if (i->addr == paddr) {
307 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
308 i->contextId, paddr);
309 i = lockedAddrList.erase(i);
310 } else {
311 i++;
312 }
313 }
314 }
315
316 return allowStore;
317 }
318
319
320 #if TRACING_ON
321
322 #define CASE(A, T) \
323 case sizeof(T): \
324 DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
325 A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
326 break
327
328
329 #define TRACE_PACKET(A) \
330 do { \
331 switch (pkt->getSize()) { \
332 CASE(A, uint64_t); \
333 CASE(A, uint32_t); \
334 CASE(A, uint16_t); \
335 CASE(A, uint8_t); \
336 default: \
337 DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
338 A, pkt->getSize(), pkt->getAddr()); \
339 DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
340 } \
341 } while (0)
342
343 #else
344
345 #define TRACE_PACKET(A)
346
347 #endif
348
349 void
350 AbstractMemory::access(PacketPtr pkt)
351 {
352 assert(pkt->getAddr() >= range.start &&
353 (pkt->getAddr() + pkt->getSize() - 1) <= range.end);
354
355 if (pkt->memInhibitAsserted()) {
356 DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
357 pkt->getAddr());
358 return;
359 }
360
361 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start;
362
363 if (pkt->cmd == MemCmd::SwapReq) {
364 TheISA::IntReg overwrite_val;
365 bool overwrite_mem;
366 uint64_t condition_val64;
367 uint32_t condition_val32;
368
369 if (!pmemAddr)
370 panic("Swap only works if there is real memory (i.e. null=False)");
371 assert(sizeof(TheISA::IntReg) >= pkt->getSize());
372
373 overwrite_mem = true;
374 // keep a copy of our possible write value, and copy what is at the
375 // memory address into the packet
376 std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
377 std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
378
379 if (pkt->req->isCondSwap()) {
380 if (pkt->getSize() == sizeof(uint64_t)) {
381 condition_val64 = pkt->req->getExtraData();
382 overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
383 sizeof(uint64_t));
384 } else if (pkt->getSize() == sizeof(uint32_t)) {
385 condition_val32 = (uint32_t)pkt->req->getExtraData();
386 overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
387 sizeof(uint32_t));
388 } else
389 panic("Invalid size for conditional read/write\n");
390 }
391
392 if (overwrite_mem)
393 std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
394
395 assert(!pkt->req->isInstFetch());
396 TRACE_PACKET("Read/Write");
397 numOther[pkt->req->masterId()]++;
398 } else if (pkt->isRead()) {
399 assert(!pkt->isWrite());
400 if (pkt->isLLSC()) {
401 trackLoadLocked(pkt);
402 }
403 if (pmemAddr)
404 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
405 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
406 numReads[pkt->req->masterId()]++;
407 bytesRead[pkt->req->masterId()] += pkt->getSize();
408 if (pkt->req->isInstFetch())
409 bytesInstRead[pkt->req->masterId()] += pkt->getSize();
410 } else if (pkt->isWrite()) {
411 if (writeOK(pkt)) {
412 if (pmemAddr)
413 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
414 assert(!pkt->req->isInstFetch());
415 TRACE_PACKET("Write");
416 numWrites[pkt->req->masterId()]++;
417 bytesWritten[pkt->req->masterId()] += pkt->getSize();
418 }
419 } else if (pkt->isInvalidate()) {
420 // no need to do anything
421 } else {
422 panic("unimplemented");
423 }
424
425 if (pkt->needsResponse()) {
426 pkt->makeResponse();
427 }
428 }
429
430 void
431 AbstractMemory::functionalAccess(PacketPtr pkt)
432 {
433 assert(pkt->getAddr() >= range.start &&
434 (pkt->getAddr() + pkt->getSize() - 1) <= range.end);
435
436 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start;
437
438 if (pkt->isRead()) {
439 if (pmemAddr)
440 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
441 TRACE_PACKET("Read");
442 pkt->makeResponse();
443 } else if (pkt->isWrite()) {
444 if (pmemAddr)
445 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
446 TRACE_PACKET("Write");
447 pkt->makeResponse();
448 } else if (pkt->isPrint()) {
449 Packet::PrintReqState *prs =
450 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
451 assert(prs);
452 // Need to call printLabels() explicitly since we're not going
453 // through printObj().
454 prs->printLabels();
455 // Right now we just print the single byte at the specified address.
456 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
457 } else {
458 panic("AbstractMemory: unimplemented functional command %s",
459 pkt->cmdString());
460 }
461 }
462
463 void
464 AbstractMemory::serialize(ostream &os)
465 {
466 if (!pmemAddr)
467 return;
468
469 gzFile compressedMem;
470 string filename = name() + ".physmem";
471 long _size = range.size();
472
473 SERIALIZE_SCALAR(filename);
474 SERIALIZE_SCALAR(_size);
475
476 // write memory file
477 string thefile = Checkpoint::dir() + "/" + filename.c_str();
478 int fd = creat(thefile.c_str(), 0664);
479 if (fd < 0) {
480 perror("creat");
481 fatal("Can't open physical memory checkpoint file '%s'\n", filename);
482 }
483
484 compressedMem = gzdopen(fd, "wb");
485 if (compressedMem == NULL)
486 fatal("Insufficient memory to allocate compression state for %s\n",
487 filename);
488
489 uint64_t pass_size = 0;
490 // gzwrite fails if (int)len < 0 (gzwrite returns int)
491 for (uint64_t written = 0; written < size(); written += pass_size) {
492 pass_size = (uint64_t)INT_MAX < (size() - written) ?
493 (uint64_t)INT_MAX : (size() - written);
494
495 if (gzwrite(compressedMem, pmemAddr + written,
496 (unsigned int) pass_size) != (int)pass_size) {
497 fatal("Write failed on physical memory checkpoint file '%s'\n",
498 filename);
499 }
500 }
501
502 if (gzclose(compressedMem))
503 fatal("Close failed on physical memory checkpoint file '%s'\n",
504 filename);
505
506 list<LockedAddr>::iterator i = lockedAddrList.begin();
507
508 vector<Addr> lal_addr;
509 vector<int> lal_cid;
510 while (i != lockedAddrList.end()) {
511 lal_addr.push_back(i->addr);
512 lal_cid.push_back(i->contextId);
513 i++;
514 }
515 arrayParamOut(os, "lal_addr", lal_addr);
516 arrayParamOut(os, "lal_cid", lal_cid);
517 }
518
519 void
520 AbstractMemory::unserialize(Checkpoint *cp, const string &section)
521 {
522 if (!pmemAddr)
523 return;
524
525 gzFile compressedMem;
526 long *tempPage;
527 long *pmem_current;
528 uint64_t curSize;
529 uint32_t bytesRead;
530 const uint32_t chunkSize = 16384;
531
532 string filename;
533
534 UNSERIALIZE_SCALAR(filename);
535
536 filename = cp->cptDir + "/" + filename;
537
538 // mmap memoryfile
539 int fd = open(filename.c_str(), O_RDONLY);
540 if (fd < 0) {
541 perror("open");
542 fatal("Can't open physical memory checkpoint file '%s'", filename);
543 }
544
545 compressedMem = gzdopen(fd, "rb");
546 if (compressedMem == NULL)
547 fatal("Insufficient memory to allocate compression state for %s\n",
548 filename);
549
550 // unmap file that was mmapped in the constructor
551 // This is done here to make sure that gzip and open don't muck with our
552 // nice large space of memory before we reallocate it
553 munmap((char*)pmemAddr, size());
554
555 long _size;
556 UNSERIALIZE_SCALAR(_size);
557 if (_size > params()->range.size())
558 fatal("Memory size has changed! size %lld, param size %lld\n",
559 _size, params()->range.size());
560
561 pmemAddr = (uint8_t *)mmap(NULL, size(),
562 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
563
564 if (pmemAddr == (void *)MAP_FAILED) {
565 perror("mmap");
566 fatal("Could not mmap physical memory!\n");
567 }
568
569 curSize = 0;
570 tempPage = (long*)malloc(chunkSize);
571 if (tempPage == NULL)
572 fatal("Unable to malloc memory to read file %s\n", filename);
573
574 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
575 while (curSize < size()) {
576 bytesRead = gzread(compressedMem, tempPage, chunkSize);
577 if (bytesRead == 0)
578 break;
579
580 assert(bytesRead % sizeof(long) == 0);
581
582 for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
583 {
584 if (*(tempPage+x) != 0) {
585 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
586 *pmem_current = *(tempPage+x);
587 }
588 }
589 curSize += bytesRead;
590 }
591
592 free(tempPage);
593
594 if (gzclose(compressedMem))
595 fatal("Close failed on physical memory checkpoint file '%s'\n",
596 filename);
597
598 vector<Addr> lal_addr;
599 vector<int> lal_cid;
600 arrayParamIn(cp, section, "lal_addr", lal_addr);
601 arrayParamIn(cp, section, "lal_cid", lal_cid);
602 for(int i = 0; i < lal_addr.size(); i++)
603 lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
604 }