2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Ron Dreslinski
32 #include <sys/types.h>
43 #include "arch/registers.hh"
44 #include "base/misc.hh"
45 #include "base/random.hh"
46 #include "base/types.hh"
47 #include "config/full_system.hh"
48 #include "config/the_isa.hh"
49 #include "mem/packet_access.hh"
50 #include "mem/physical.hh"
51 #include "sim/eventq.hh"
54 using namespace TheISA
;
56 PhysicalMemory::PhysicalMemory(const Params
*p
)
57 : MemObject(p
), pmemAddr(NULL
), pagePtr(0),
58 lat(p
->latency
), lat_var(p
->latency_var
),
59 cachedSize(params()->range
.size()), cachedStart(params()->range
.start
)
61 if (params()->range
.size() % TheISA::PageBytes
!= 0)
62 panic("Memory Size not divisible by page size\n");
67 int map_flags
= MAP_ANON
| MAP_PRIVATE
;
68 pmemAddr
= (uint8_t *)mmap(NULL
, params()->range
.size(),
69 PROT_READ
| PROT_WRITE
, map_flags
, -1, 0);
71 if (pmemAddr
== (void *)MAP_FAILED
) {
73 fatal("Could not mmap!\n");
76 //If requested, initialize all the memory to 0
78 memset(pmemAddr
, 0, p
->range
.size());
82 PhysicalMemory::init()
84 if (ports
.size() == 0) {
85 fatal("PhysicalMemory object %s is unconnected!", name());
88 for (PortIterator pi
= ports
.begin(); pi
!= ports
.end(); ++pi
) {
90 (*pi
)->sendStatusChange(Port::RangeChange
);
94 PhysicalMemory::~PhysicalMemory()
97 munmap((char*)pmemAddr
, params()->range
.size());
102 PhysicalMemory::new_page()
104 Addr return_addr
= pagePtr
<< LogVMPageSize
;
105 return_addr
+= start();
112 PhysicalMemory::deviceBlockSize() const
114 //Can accept anysize request
119 PhysicalMemory::calculateLatency(PacketPtr pkt
)
123 latency
+= random_mt
.random
<Tick
>(0, lat_var
);
129 // Add load-locked to tracking list. Should only be called if the
130 // operation is a load and the LLSC flag is set.
132 PhysicalMemory::trackLoadLocked(PacketPtr pkt
)
134 Request
*req
= pkt
->req
;
135 Addr paddr
= LockedAddr::mask(req
->getPaddr());
137 // first we check if we already have a locked addr for this
138 // xc. Since each xc only gets one, we just update the
139 // existing record with the new address.
140 list
<LockedAddr
>::iterator i
;
142 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
143 if (i
->matchesContext(req
)) {
144 DPRINTF(LLSC
, "Modifying lock record: context %d addr %#x\n",
145 req
->contextId(), paddr
);
151 // no record for this xc: need to allocate a new one
152 DPRINTF(LLSC
, "Adding lock record: context %d addr %#x\n",
153 req
->contextId(), paddr
);
154 lockedAddrList
.push_front(LockedAddr(req
));
158 // Called on *writes* only... both regular stores and
159 // store-conditional operations. Check for conventional stores which
160 // conflict with locked addresses, and for success/failure of store
163 PhysicalMemory::checkLockedAddrList(PacketPtr pkt
)
165 Request
*req
= pkt
->req
;
166 Addr paddr
= LockedAddr::mask(req
->getPaddr());
167 bool isLLSC
= pkt
->isLLSC();
169 // Initialize return value. Non-conditional stores always
170 // succeed. Assume conditional stores will fail until proven
172 bool success
= !isLLSC
;
174 // Iterate over list. Note that there could be multiple matching
175 // records, as more than one context could have done a load locked
177 list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
179 while (i
!= lockedAddrList
.end()) {
181 if (i
->addr
== paddr
) {
182 // we have a matching address
184 if (isLLSC
&& i
->matchesContext(req
)) {
185 // it's a store conditional, and as far as the memory
186 // system can tell, the requesting context's lock is
188 DPRINTF(LLSC
, "StCond success: context %d addr %#x\n",
189 req
->contextId(), paddr
);
193 // Get rid of our record of this lock and advance to next
194 DPRINTF(LLSC
, "Erasing lock record: context %d addr %#x\n",
195 i
->contextId
, paddr
);
196 i
= lockedAddrList
.erase(i
);
199 // no match: advance to next record
205 req
->setExtraData(success
? 1 : 0);
216 DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
217 A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
221 #define TRACE_PACKET(A) \
223 switch (pkt->getSize()) { \
229 DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
230 A, pkt->getSize(), pkt->getAddr()); \
236 #define TRACE_PACKET(A)
241 PhysicalMemory::doAtomicAccess(PacketPtr pkt
)
243 assert(pkt
->getAddr() >= start() &&
244 pkt
->getAddr() + pkt
->getSize() <= start() + size());
246 if (pkt
->memInhibitAsserted()) {
247 DPRINTF(MemoryAccess
, "mem inhibited on 0x%x: not responding\n",
252 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - start();
254 if (pkt
->cmd
== MemCmd::SwapReq
) {
255 IntReg overwrite_val
;
257 uint64_t condition_val64
;
258 uint32_t condition_val32
;
261 panic("Swap only works if there is real memory (i.e. null=False)");
262 assert(sizeof(IntReg
) >= pkt
->getSize());
264 overwrite_mem
= true;
265 // keep a copy of our possible write value, and copy what is at the
266 // memory address into the packet
267 std::memcpy(&overwrite_val
, pkt
->getPtr
<uint8_t>(), pkt
->getSize());
268 std::memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
270 if (pkt
->req
->isCondSwap()) {
271 if (pkt
->getSize() == sizeof(uint64_t)) {
272 condition_val64
= pkt
->req
->getExtraData();
273 overwrite_mem
= !std::memcmp(&condition_val64
, hostAddr
,
275 } else if (pkt
->getSize() == sizeof(uint32_t)) {
276 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
277 overwrite_mem
= !std::memcmp(&condition_val32
, hostAddr
,
280 panic("Invalid size for conditional read/write\n");
284 std::memcpy(hostAddr
, &overwrite_val
, pkt
->getSize());
286 assert(!pkt
->req
->isInstFetch());
287 TRACE_PACKET("Read/Write");
288 } else if (pkt
->isRead()) {
289 assert(!pkt
->isWrite());
291 trackLoadLocked(pkt
);
294 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
295 TRACE_PACKET(pkt
->req
->isInstFetch() ? "IFetch" : "Read");
296 } else if (pkt
->isWrite()) {
299 memcpy(hostAddr
, pkt
->getPtr
<uint8_t>(), pkt
->getSize());
300 assert(!pkt
->req
->isInstFetch());
301 TRACE_PACKET("Write");
303 } else if (pkt
->isInvalidate()) {
304 //upgrade or invalidate
305 if (pkt
->needsResponse()) {
306 pkt
->makeAtomicResponse();
309 panic("unimplemented");
312 if (pkt
->needsResponse()) {
313 pkt
->makeAtomicResponse();
315 return calculateLatency(pkt
);
320 PhysicalMemory::doFunctionalAccess(PacketPtr pkt
)
322 assert(pkt
->getAddr() >= start() &&
323 pkt
->getAddr() + pkt
->getSize() <= start() + size());
326 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - start();
330 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
331 TRACE_PACKET("Read");
332 pkt
->makeAtomicResponse();
333 } else if (pkt
->isWrite()) {
335 memcpy(hostAddr
, pkt
->getPtr
<uint8_t>(), pkt
->getSize());
336 TRACE_PACKET("Write");
337 pkt
->makeAtomicResponse();
338 } else if (pkt
->isPrint()) {
339 Packet::PrintReqState
*prs
=
340 dynamic_cast<Packet::PrintReqState
*>(pkt
->senderState
);
341 // Need to call printLabels() explicitly since we're not going
342 // through printObj().
344 // Right now we just print the single byte at the specified address.
345 ccprintf(prs
->os
, "%s%#x\n", prs
->curPrefix(), *hostAddr
);
347 panic("PhysicalMemory: unimplemented functional command %s",
354 PhysicalMemory::getPort(const std::string
&if_name
, int idx
)
356 // Accept request for "functional" port for backwards compatibility
357 // with places where this function is called from C++. I'd prefer
358 // to move all these into Python someday.
359 if (if_name
== "functional") {
360 return new MemoryPort(csprintf("%s-functional", name()), this);
363 if (if_name
!= "port") {
364 panic("PhysicalMemory::getPort: unknown port %s requested", if_name
);
367 if (idx
>= (int)ports
.size()) {
368 ports
.resize(idx
+ 1);
371 if (ports
[idx
] != NULL
) {
372 panic("PhysicalMemory::getPort: port %d already assigned", idx
);
376 new MemoryPort(csprintf("%s-port%d", name(), idx
), this);
384 PhysicalMemory::recvStatusChange(Port::Status status
)
388 PhysicalMemory::MemoryPort::MemoryPort(const std::string
&_name
,
389 PhysicalMemory
*_memory
)
390 : SimpleTimingPort(_name
, _memory
), memory(_memory
)
394 PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status
)
396 memory
->recvStatusChange(status
);
400 PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList
&resp
,
403 memory
->getAddressRanges(resp
, snoop
);
407 PhysicalMemory::getAddressRanges(AddrRangeList
&resp
, bool &snoop
)
411 resp
.push_back(RangeSize(start(), params()->range
.size()));
415 PhysicalMemory::MemoryPort::deviceBlockSize() const
417 return memory
->deviceBlockSize();
421 PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt
)
423 return memory
->doAtomicAccess(pkt
);
427 PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt
)
429 pkt
->pushLabel(memory
->name());
431 if (!checkFunctional(pkt
)) {
432 // Default implementation of SimpleTimingPort::recvFunctional()
433 // calls recvAtomic() and throws away the latency; we can save a
434 // little here by just not calculating the latency.
435 memory
->doFunctionalAccess(pkt
);
442 PhysicalMemory::drain(Event
*de
)
445 for (PortIterator pi
= ports
.begin(); pi
!= ports
.end(); ++pi
) {
446 count
+= (*pi
)->drain(de
);
450 changeState(Draining
);
452 changeState(Drained
);
457 PhysicalMemory::serialize(ostream
&os
)
462 gzFile compressedMem
;
463 string filename
= name() + ".physmem";
465 SERIALIZE_SCALAR(filename
);
468 string thefile
= Checkpoint::dir() + "/" + filename
.c_str();
469 int fd
= creat(thefile
.c_str(), 0664);
472 fatal("Can't open physical memory checkpoint file '%s'\n", filename
);
475 compressedMem
= gzdopen(fd
, "wb");
476 if (compressedMem
== NULL
)
477 fatal("Insufficient memory to allocate compression state for %s\n",
480 if (gzwrite(compressedMem
, pmemAddr
, params()->range
.size()) !=
481 (int)params()->range
.size()) {
482 fatal("Write failed on physical memory checkpoint file '%s'\n",
486 if (gzclose(compressedMem
))
487 fatal("Close failed on physical memory checkpoint file '%s'\n",
492 PhysicalMemory::unserialize(Checkpoint
*cp
, const string
§ion
)
497 gzFile compressedMem
;
502 const uint32_t chunkSize
= 16384;
506 UNSERIALIZE_SCALAR(filename
);
508 filename
= cp
->cptDir
+ "/" + filename
;
511 int fd
= open(filename
.c_str(), O_RDONLY
);
514 fatal("Can't open physical memory checkpoint file '%s'", filename
);
517 compressedMem
= gzdopen(fd
, "rb");
518 if (compressedMem
== NULL
)
519 fatal("Insufficient memory to allocate compression state for %s\n",
522 // unmap file that was mmaped in the constructor
523 // This is done here to make sure that gzip and open don't muck with our
524 // nice large space of memory before we reallocate it
525 munmap((char*)pmemAddr
, params()->range
.size());
527 pmemAddr
= (uint8_t *)mmap(NULL
, params()->range
.size(),
528 PROT_READ
| PROT_WRITE
, MAP_ANON
| MAP_PRIVATE
, -1, 0);
530 if (pmemAddr
== (void *)MAP_FAILED
) {
532 fatal("Could not mmap physical memory!\n");
536 tempPage
= (long*)malloc(chunkSize
);
537 if (tempPage
== NULL
)
538 fatal("Unable to malloc memory to read file %s\n", filename
);
540 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
541 while (curSize
< params()->range
.size()) {
542 bytesRead
= gzread(compressedMem
, tempPage
, chunkSize
);
546 assert(bytesRead
% sizeof(long) == 0);
548 for (uint32_t x
= 0; x
< bytesRead
/ sizeof(long); x
++)
550 if (*(tempPage
+x
) != 0) {
551 pmem_current
= (long*)(pmemAddr
+ curSize
+ x
* sizeof(long));
552 *pmem_current
= *(tempPage
+x
);
555 curSize
+= bytesRead
;
560 if (gzclose(compressedMem
))
561 fatal("Close failed on physical memory checkpoint file '%s'\n",
567 PhysicalMemoryParams::create()
569 return new PhysicalMemory(this);