Merge zizzer:/bk/newmem
[gem5.git] / src / mem / physical.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 * Ali Saidi
30 */
31
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <errno.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <zlib.h>
38
39 #include <iostream>
40 #include <string>
41
42 #include "arch/isa_traits.hh"
43 #include "base/misc.hh"
44 #include "config/full_system.hh"
45 #include "mem/packet_access.hh"
46 #include "mem/physical.hh"
47 #include "sim/builder.hh"
48 #include "sim/eventq.hh"
49 #include "sim/host.hh"
50
51 using namespace std;
52 using namespace TheISA;
53
54 PhysicalMemory::PhysicalMemory(Params *p)
55 : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p)
56 {
57 if (params()->addrRange.size() % TheISA::PageBytes != 0)
58 panic("Memory Size not divisible by page size\n");
59
60 int map_flags = MAP_ANON | MAP_PRIVATE;
61 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
62 map_flags, -1, 0);
63
64 if (pmemAddr == (void *)MAP_FAILED) {
65 perror("mmap");
66 fatal("Could not mmap!\n");
67 }
68
69 //If requested, initialize all the memory to 0
70 if(params()->zero)
71 memset(pmemAddr, 0, params()->addrRange.size());
72
73 pagePtr = 0;
74 }
75
76 void
77 PhysicalMemory::init()
78 {
79 if (!port)
80 panic("PhysicalMemory not connected to anything!");
81 port->sendStatusChange(Port::RangeChange);
82 }
83
84 PhysicalMemory::~PhysicalMemory()
85 {
86 if (pmemAddr)
87 munmap((char*)pmemAddr, params()->addrRange.size());
88 //Remove memPorts?
89 }
90
91 Addr
92 PhysicalMemory::new_page()
93 {
94 Addr return_addr = pagePtr << LogVMPageSize;
95 return_addr += start();
96
97 ++pagePtr;
98 return return_addr;
99 }
100
101 int
102 PhysicalMemory::deviceBlockSize()
103 {
104 //Can accept anysize request
105 return 0;
106 }
107
108 Tick
109 PhysicalMemory::calculateLatency(PacketPtr pkt)
110 {
111 return lat;
112 }
113
114
115
116 // Add load-locked to tracking list. Should only be called if the
117 // operation is a load and the LOCKED flag is set.
118 void
119 PhysicalMemory::trackLoadLocked(Request *req)
120 {
121 Addr paddr = LockedAddr::mask(req->getPaddr());
122
123 // first we check if we already have a locked addr for this
124 // xc. Since each xc only gets one, we just update the
125 // existing record with the new address.
126 list<LockedAddr>::iterator i;
127
128 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
129 if (i->matchesContext(req)) {
130 DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
131 req->getCpuNum(), req->getThreadNum(), paddr);
132 i->addr = paddr;
133 return;
134 }
135 }
136
137 // no record for this xc: need to allocate a new one
138 DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
139 req->getCpuNum(), req->getThreadNum(), paddr);
140 lockedAddrList.push_front(LockedAddr(req));
141 }
142
143
144 // Called on *writes* only... both regular stores and
145 // store-conditional operations. Check for conventional stores which
146 // conflict with locked addresses, and for success/failure of store
147 // conditionals.
148 bool
149 PhysicalMemory::checkLockedAddrList(Request *req)
150 {
151 Addr paddr = LockedAddr::mask(req->getPaddr());
152 bool isLocked = req->isLocked();
153
154 // Initialize return value. Non-conditional stores always
155 // succeed. Assume conditional stores will fail until proven
156 // otherwise.
157 bool success = !isLocked;
158
159 // Iterate over list. Note that there could be multiple matching
160 // records, as more than one context could have done a load locked
161 // to this location.
162 list<LockedAddr>::iterator i = lockedAddrList.begin();
163
164 while (i != lockedAddrList.end()) {
165
166 if (i->addr == paddr) {
167 // we have a matching address
168
169 if (isLocked && i->matchesContext(req)) {
170 // it's a store conditional, and as far as the memory
171 // system can tell, the requesting context's lock is
172 // still valid.
173 DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
174 req->getCpuNum(), req->getThreadNum(), paddr);
175 success = true;
176 }
177
178 // Get rid of our record of this lock and advance to next
179 DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
180 i->cpuNum, i->threadNum, paddr);
181 i = lockedAddrList.erase(i);
182 }
183 else {
184 // no match: advance to next record
185 ++i;
186 }
187 }
188
189 if (isLocked) {
190 req->setExtraData(success ? 1 : 0);
191 }
192
193 return success;
194 }
195
196 void
197 PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
198 {
199 assert(pkt->getAddr() >= start() &&
200 pkt->getAddr() + pkt->getSize() <= start() + size());
201
202 if (pkt->isRead()) {
203 if (pkt->req->isLocked()) {
204 trackLoadLocked(pkt->req);
205 }
206 memcpy(pkt->getPtr<uint8_t>(), pmemAddr + pkt->getAddr() - start(),
207 pkt->getSize());
208 #if TRACING_ON
209 switch (pkt->getSize()) {
210 case sizeof(uint64_t):
211 DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
212 pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
213 break;
214 case sizeof(uint32_t):
215 DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
216 pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
217 break;
218 case sizeof(uint16_t):
219 DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
220 pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
221 break;
222 case sizeof(uint8_t):
223 DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
224 pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
225 break;
226 default:
227 DPRINTF(MemoryAccess, "Read of size %i on address 0x%x\n",
228 pkt->getSize(), pkt->getAddr());
229 }
230 #endif
231 }
232 else if (pkt->isWrite()) {
233 if (writeOK(pkt->req)) {
234 memcpy(pmemAddr + pkt->getAddr() - start(), pkt->getPtr<uint8_t>(),
235 pkt->getSize());
236 #if TRACING_ON
237 switch (pkt->getSize()) {
238 case sizeof(uint64_t):
239 DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
240 pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
241 break;
242 case sizeof(uint32_t):
243 DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
244 pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
245 break;
246 case sizeof(uint16_t):
247 DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
248 pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
249 break;
250 case sizeof(uint8_t):
251 DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
252 pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
253 break;
254 default:
255 DPRINTF(MemoryAccess, "Write of size %i on address 0x%x\n",
256 pkt->getSize(), pkt->getAddr());
257 }
258 #endif
259 }
260 } else if (pkt->isInvalidate()) {
261 //upgrade or invalidate
262 pkt->flags |= SATISFIED;
263 } else if (pkt->isReadWrite()) {
264 IntReg overwrite_val;
265 bool overwrite_mem;
266 uint64_t condition_val64;
267 uint32_t condition_val32;
268
269 assert(sizeof(IntReg) >= pkt->getSize());
270
271 overwrite_mem = true;
272 // keep a copy of our possible write value, and copy what is at the
273 // memory address into the packet
274 std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
275 std::memcpy(pkt->getPtr<uint8_t>(), pmemAddr + pkt->getAddr() - start(),
276 pkt->getSize());
277
278 if (pkt->req->isCondSwap()) {
279 if (pkt->getSize() == sizeof(uint64_t)) {
280 condition_val64 = pkt->req->getExtraData();
281 overwrite_mem = !std::memcmp(&condition_val64, pmemAddr +
282 pkt->getAddr() - start(), sizeof(uint64_t));
283 } else if (pkt->getSize() == sizeof(uint32_t)) {
284 condition_val32 = (uint32_t)pkt->req->getExtraData();
285 overwrite_mem = !std::memcmp(&condition_val32, pmemAddr +
286 pkt->getAddr() - start(), sizeof(uint32_t));
287 } else
288 panic("Invalid size for conditional read/write\n");
289 }
290
291 if (overwrite_mem)
292 std::memcpy(pmemAddr + pkt->getAddr() - start(),
293 &overwrite_val, pkt->getSize());
294
295 #if TRACING_ON
296 switch (pkt->getSize()) {
297 case sizeof(uint64_t):
298 DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
299 pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
300 DPRINTF(MemoryAccess, "New Data 0x%x %s conditional (0x%x) and %s \n",
301 overwrite_mem, pkt->req->isCondSwap() ? "was" : "wasn't",
302 condition_val64, overwrite_mem ? "happened" : "didn't happen");
303 break;
304 case sizeof(uint32_t):
305 DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
306 pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
307 DPRINTF(MemoryAccess, "New Data 0x%x %s conditional (0x%x) and %s \n",
308 overwrite_mem, pkt->req->isCondSwap() ? "was" : "wasn't",
309 condition_val32, overwrite_mem ? "happened" : "didn't happen");
310 break;
311 case sizeof(uint16_t):
312 DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
313 pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
314 DPRINTF(MemoryAccess, "New Data 0x%x wasn't conditional and happned\n",
315 overwrite_mem);
316 break;
317 case sizeof(uint8_t):
318 DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
319 pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
320 DPRINTF(MemoryAccess, "New Data 0x%x wasn't conditional and happned\n",
321 overwrite_mem);
322 break;
323 default:
324 DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x\n",
325 pkt->getSize(), pkt->getAddr());
326 }
327 #endif
328 } else {
329 panic("unimplemented");
330 }
331
332 pkt->result = Packet::Success;
333 }
334
335 Port *
336 PhysicalMemory::getPort(const std::string &if_name, int idx)
337 {
338 if (if_name == "port" && idx == -1) {
339 if (port != NULL)
340 panic("PhysicalMemory::getPort: additional port requested to memory!");
341 port = new MemoryPort(name() + "-port", this);
342 return port;
343 } else if (if_name == "functional") {
344 /* special port for functional writes at startup. And for memtester */
345 return new MemoryPort(name() + "-funcport", this);
346 } else {
347 panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
348 }
349 }
350
351 void
352 PhysicalMemory::recvStatusChange(Port::Status status)
353 {
354 }
355
356 PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
357 PhysicalMemory *_memory)
358 : SimpleTimingPort(_name), memory(_memory)
359 { }
360
361 void
362 PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
363 {
364 memory->recvStatusChange(status);
365 }
366
367 void
368 PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
369 AddrRangeList &snoop)
370 {
371 memory->getAddressRanges(resp, snoop);
372 }
373
374 void
375 PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
376 {
377 snoop.clear();
378 resp.clear();
379 resp.push_back(RangeSize(start(),
380 params()->addrRange.size()));
381 }
382
383 int
384 PhysicalMemory::MemoryPort::deviceBlockSize()
385 {
386 return memory->deviceBlockSize();
387 }
388
389 Tick
390 PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
391 {
392 memory->doFunctionalAccess(pkt);
393 return memory->calculateLatency(pkt);
394 }
395
396 void
397 PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
398 {
399 //Since we are overriding the function, make sure to have the impl of the
400 //check or functional accesses here.
401 std::list<std::pair<Tick,PacketPtr> >::iterator i = transmitList.begin();
402 std::list<std::pair<Tick,PacketPtr> >::iterator end = transmitList.end();
403 bool notDone = true;
404
405 while (i != end && notDone) {
406 PacketPtr target = i->second;
407 // If the target contains data, and it overlaps the
408 // probed request, need to update data
409 if (target->intersect(pkt))
410 notDone = fixPacket(pkt, target);
411 i++;
412 }
413
414 // Default implementation of SimpleTimingPort::recvFunctional()
415 // calls recvAtomic() and throws away the latency; we can save a
416 // little here by just not calculating the latency.
417 memory->doFunctionalAccess(pkt);
418 }
419
420 unsigned int
421 PhysicalMemory::drain(Event *de)
422 {
423 int count = port->drain(de);
424 if (count)
425 changeState(Draining);
426 else
427 changeState(Drained);
428 return count;
429 }
430
431 void
432 PhysicalMemory::serialize(ostream &os)
433 {
434 gzFile compressedMem;
435 string filename = name() + ".physmem";
436
437 SERIALIZE_SCALAR(filename);
438
439 // write memory file
440 string thefile = Checkpoint::dir() + "/" + filename.c_str();
441 int fd = creat(thefile.c_str(), 0664);
442 if (fd < 0) {
443 perror("creat");
444 fatal("Can't open physical memory checkpoint file '%s'\n", filename);
445 }
446
447 compressedMem = gzdopen(fd, "wb");
448 if (compressedMem == NULL)
449 fatal("Insufficient memory to allocate compression state for %s\n",
450 filename);
451
452 if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) {
453 fatal("Write failed on physical memory checkpoint file '%s'\n",
454 filename);
455 }
456
457 if (gzclose(compressedMem))
458 fatal("Close failed on physical memory checkpoint file '%s'\n",
459 filename);
460 }
461
462 void
463 PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
464 {
465 gzFile compressedMem;
466 long *tempPage;
467 long *pmem_current;
468 uint64_t curSize;
469 uint32_t bytesRead;
470 const int chunkSize = 16384;
471
472
473 string filename;
474
475 UNSERIALIZE_SCALAR(filename);
476
477 filename = cp->cptDir + "/" + filename;
478
479 // mmap memoryfile
480 int fd = open(filename.c_str(), O_RDONLY);
481 if (fd < 0) {
482 perror("open");
483 fatal("Can't open physical memory checkpoint file '%s'", filename);
484 }
485
486 compressedMem = gzdopen(fd, "rb");
487 if (compressedMem == NULL)
488 fatal("Insufficient memory to allocate compression state for %s\n",
489 filename);
490
491 // unmap file that was mmaped in the constructor
492 // This is done here to make sure that gzip and open don't muck with our
493 // nice large space of memory before we reallocate it
494 munmap((char*)pmemAddr, params()->addrRange.size());
495
496 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
497 MAP_ANON | MAP_PRIVATE, -1, 0);
498
499 if (pmemAddr == (void *)MAP_FAILED) {
500 perror("mmap");
501 fatal("Could not mmap physical memory!\n");
502 }
503
504 curSize = 0;
505 tempPage = (long*)malloc(chunkSize);
506 if (tempPage == NULL)
507 fatal("Unable to malloc memory to read file %s\n", filename);
508
509 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
510 while (curSize < params()->addrRange.size()) {
511 bytesRead = gzread(compressedMem, tempPage, chunkSize);
512 if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize)
513 fatal("Read failed on physical memory checkpoint file '%s'"
514 " got %d bytes, expected %d or %d bytes\n",
515 filename, bytesRead, chunkSize, params()->addrRange.size()-curSize);
516
517 assert(bytesRead % sizeof(long) == 0);
518
519 for (int x = 0; x < bytesRead/sizeof(long); x++)
520 {
521 if (*(tempPage+x) != 0) {
522 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
523 *pmem_current = *(tempPage+x);
524 }
525 }
526 curSize += bytesRead;
527 }
528
529 free(tempPage);
530
531 if (gzclose(compressedMem))
532 fatal("Close failed on physical memory checkpoint file '%s'\n",
533 filename);
534
535 }
536
537
538 BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
539
540 Param<string> file;
541 Param<Range<Addr> > range;
542 Param<Tick> latency;
543 Param<bool> zero;
544
545 END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
546
547 BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
548
549 INIT_PARAM_DFLT(file, "memory mapped file", ""),
550 INIT_PARAM(range, "Device Address Range"),
551 INIT_PARAM(latency, "Memory access latency"),
552 INIT_PARAM(zero, "Zero initialize memory")
553
554 END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
555
556 CREATE_SIM_OBJECT(PhysicalMemory)
557 {
558 PhysicalMemory::Params *p = new PhysicalMemory::Params;
559 p->name = getInstanceName();
560 p->addrRange = range;
561 p->latency = latency;
562 p->zero = zero;
563 return new PhysicalMemory(p);
564 }
565
566 REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory)