2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Ron Dreslinski
32 #include <sys/types.h>
42 #include "arch/registers.hh"
43 #include "base/misc.hh"
44 #include "base/random.hh"
45 #include "base/types.hh"
46 #include "config/full_system.hh"
47 #include "mem/packet_access.hh"
48 #include "mem/physical.hh"
49 #include "sim/eventq.hh"
52 using namespace TheISA
;
54 PhysicalMemory::PhysicalMemory(const Params
*p
)
55 : MemObject(p
), pmemAddr(NULL
), pagePtr(0),
56 lat(p
->latency
), lat_var(p
->latency_var
),
57 cachedSize(params()->range
.size()), cachedStart(params()->range
.start
)
59 if (params()->range
.size() % TheISA::PageBytes
!= 0)
60 panic("Memory Size not divisible by page size\n");
65 int map_flags
= MAP_ANON
| MAP_PRIVATE
;
66 pmemAddr
= (uint8_t *)mmap(NULL
, params()->range
.size(),
67 PROT_READ
| PROT_WRITE
, map_flags
, -1, 0);
69 if (pmemAddr
== (void *)MAP_FAILED
) {
71 fatal("Could not mmap!\n");
74 //If requested, initialize all the memory to 0
76 memset(pmemAddr
, 0, p
->range
.size());
80 PhysicalMemory::init()
82 if (ports
.size() == 0) {
83 fatal("PhysicalMemory object %s is unconnected!", name());
86 for (PortIterator pi
= ports
.begin(); pi
!= ports
.end(); ++pi
) {
88 (*pi
)->sendStatusChange(Port::RangeChange
);
92 PhysicalMemory::~PhysicalMemory()
95 munmap((char*)pmemAddr
, params()->range
.size());
100 PhysicalMemory::new_page()
102 Addr return_addr
= pagePtr
<< LogVMPageSize
;
103 return_addr
+= start();
110 PhysicalMemory::deviceBlockSize() const
112 //Can accept anysize request
117 PhysicalMemory::calculateLatency(PacketPtr pkt
)
121 latency
+= random_mt
.random
<Tick
>(0, lat_var
);
127 // Add load-locked to tracking list. Should only be called if the
128 // operation is a load and the LLSC flag is set.
130 PhysicalMemory::trackLoadLocked(PacketPtr pkt
)
132 Request
*req
= pkt
->req
;
133 Addr paddr
= LockedAddr::mask(req
->getPaddr());
135 // first we check if we already have a locked addr for this
136 // xc. Since each xc only gets one, we just update the
137 // existing record with the new address.
138 list
<LockedAddr
>::iterator i
;
140 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
141 if (i
->matchesContext(req
)) {
142 DPRINTF(LLSC
, "Modifying lock record: context %d addr %#x\n",
143 req
->contextId(), paddr
);
149 // no record for this xc: need to allocate a new one
150 DPRINTF(LLSC
, "Adding lock record: context %d addr %#x\n",
151 req
->contextId(), paddr
);
152 lockedAddrList
.push_front(LockedAddr(req
));
156 // Called on *writes* only... both regular stores and
157 // store-conditional operations. Check for conventional stores which
158 // conflict with locked addresses, and for success/failure of store
161 PhysicalMemory::checkLockedAddrList(PacketPtr pkt
)
163 Request
*req
= pkt
->req
;
164 Addr paddr
= LockedAddr::mask(req
->getPaddr());
165 bool isLLSC
= pkt
->isLLSC();
167 // Initialize return value. Non-conditional stores always
168 // succeed. Assume conditional stores will fail until proven
170 bool success
= !isLLSC
;
172 // Iterate over list. Note that there could be multiple matching
173 // records, as more than one context could have done a load locked
175 list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
177 while (i
!= lockedAddrList
.end()) {
179 if (i
->addr
== paddr
) {
180 // we have a matching address
182 if (isLLSC
&& i
->matchesContext(req
)) {
183 // it's a store conditional, and as far as the memory
184 // system can tell, the requesting context's lock is
186 DPRINTF(LLSC
, "StCond success: context %d addr %#x\n",
187 req
->contextId(), paddr
);
191 // Get rid of our record of this lock and advance to next
192 DPRINTF(LLSC
, "Erasing lock record: context %d addr %#x\n",
193 i
->contextId
, paddr
);
194 i
= lockedAddrList
.erase(i
);
197 // no match: advance to next record
203 req
->setExtraData(success
? 1 : 0);
214 DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
215 A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
219 #define TRACE_PACKET(A) \
221 switch (pkt->getSize()) { \
227 DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
228 A, pkt->getSize(), pkt->getAddr()); \
234 #define TRACE_PACKET(A)
239 PhysicalMemory::doAtomicAccess(PacketPtr pkt
)
241 assert(pkt
->getAddr() >= start() &&
242 pkt
->getAddr() + pkt
->getSize() <= start() + size());
244 if (pkt
->memInhibitAsserted()) {
245 DPRINTF(MemoryAccess
, "mem inhibited on 0x%x: not responding\n",
250 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - start();
252 if (pkt
->cmd
== MemCmd::SwapReq
) {
253 IntReg overwrite_val
;
255 uint64_t condition_val64
;
256 uint32_t condition_val32
;
259 panic("Swap only works if there is real memory (i.e. null=False)");
260 assert(sizeof(IntReg
) >= pkt
->getSize());
262 overwrite_mem
= true;
263 // keep a copy of our possible write value, and copy what is at the
264 // memory address into the packet
265 std::memcpy(&overwrite_val
, pkt
->getPtr
<uint8_t>(), pkt
->getSize());
266 std::memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
268 if (pkt
->req
->isCondSwap()) {
269 if (pkt
->getSize() == sizeof(uint64_t)) {
270 condition_val64
= pkt
->req
->getExtraData();
271 overwrite_mem
= !std::memcmp(&condition_val64
, hostAddr
,
273 } else if (pkt
->getSize() == sizeof(uint32_t)) {
274 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
275 overwrite_mem
= !std::memcmp(&condition_val32
, hostAddr
,
278 panic("Invalid size for conditional read/write\n");
282 std::memcpy(hostAddr
, &overwrite_val
, pkt
->getSize());
284 assert(!pkt
->req
->isInstFetch());
285 TRACE_PACKET("Read/Write");
286 } else if (pkt
->isRead()) {
287 assert(!pkt
->isWrite());
289 trackLoadLocked(pkt
);
292 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
293 TRACE_PACKET(pkt
->req
->isInstFetch() ? "IFetch" : "Read");
294 } else if (pkt
->isWrite()) {
297 memcpy(hostAddr
, pkt
->getPtr
<uint8_t>(), pkt
->getSize());
298 assert(!pkt
->req
->isInstFetch());
299 TRACE_PACKET("Write");
301 } else if (pkt
->isInvalidate()) {
302 //upgrade or invalidate
303 if (pkt
->needsResponse()) {
304 pkt
->makeAtomicResponse();
307 panic("unimplemented");
310 if (pkt
->needsResponse()) {
311 pkt
->makeAtomicResponse();
313 return calculateLatency(pkt
);
318 PhysicalMemory::doFunctionalAccess(PacketPtr pkt
)
320 assert(pkt
->getAddr() >= start() &&
321 pkt
->getAddr() + pkt
->getSize() <= start() + size());
324 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - start();
328 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
329 TRACE_PACKET("Read");
330 pkt
->makeAtomicResponse();
331 } else if (pkt
->isWrite()) {
333 memcpy(hostAddr
, pkt
->getPtr
<uint8_t>(), pkt
->getSize());
334 TRACE_PACKET("Write");
335 pkt
->makeAtomicResponse();
336 } else if (pkt
->isPrint()) {
337 Packet::PrintReqState
*prs
=
338 dynamic_cast<Packet::PrintReqState
*>(pkt
->senderState
);
339 // Need to call printLabels() explicitly since we're not going
340 // through printObj().
342 // Right now we just print the single byte at the specified address.
343 ccprintf(prs
->os
, "%s%#x\n", prs
->curPrefix(), *hostAddr
);
345 panic("PhysicalMemory: unimplemented functional command %s",
352 PhysicalMemory::getPort(const std::string
&if_name
, int idx
)
354 // Accept request for "functional" port for backwards compatibility
355 // with places where this function is called from C++. I'd prefer
356 // to move all these into Python someday.
357 if (if_name
== "functional") {
358 return new MemoryPort(csprintf("%s-functional", name()), this);
361 if (if_name
!= "port") {
362 panic("PhysicalMemory::getPort: unknown port %s requested", if_name
);
365 if (idx
>= (int)ports
.size()) {
366 ports
.resize(idx
+ 1);
369 if (ports
[idx
] != NULL
) {
370 panic("PhysicalMemory::getPort: port %d already assigned", idx
);
374 new MemoryPort(csprintf("%s-port%d", name(), idx
), this);
382 PhysicalMemory::recvStatusChange(Port::Status status
)
386 PhysicalMemory::MemoryPort::MemoryPort(const std::string
&_name
,
387 PhysicalMemory
*_memory
)
388 : SimpleTimingPort(_name
, _memory
), memory(_memory
)
392 PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status
)
394 memory
->recvStatusChange(status
);
398 PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList
&resp
,
401 memory
->getAddressRanges(resp
, snoop
);
405 PhysicalMemory::getAddressRanges(AddrRangeList
&resp
, bool &snoop
)
409 resp
.push_back(RangeSize(start(), params()->range
.size()));
413 PhysicalMemory::MemoryPort::deviceBlockSize() const
415 return memory
->deviceBlockSize();
419 PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt
)
421 return memory
->doAtomicAccess(pkt
);
425 PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt
)
427 pkt
->pushLabel(memory
->name());
429 if (!checkFunctional(pkt
)) {
430 // Default implementation of SimpleTimingPort::recvFunctional()
431 // calls recvAtomic() and throws away the latency; we can save a
432 // little here by just not calculating the latency.
433 memory
->doFunctionalAccess(pkt
);
440 PhysicalMemory::drain(Event
*de
)
443 for (PortIterator pi
= ports
.begin(); pi
!= ports
.end(); ++pi
) {
444 count
+= (*pi
)->drain(de
);
448 changeState(Draining
);
450 changeState(Drained
);
455 PhysicalMemory::serialize(ostream
&os
)
460 gzFile compressedMem
;
461 string filename
= name() + ".physmem";
463 SERIALIZE_SCALAR(filename
);
466 string thefile
= Checkpoint::dir() + "/" + filename
.c_str();
467 int fd
= creat(thefile
.c_str(), 0664);
470 fatal("Can't open physical memory checkpoint file '%s'\n", filename
);
473 compressedMem
= gzdopen(fd
, "wb");
474 if (compressedMem
== NULL
)
475 fatal("Insufficient memory to allocate compression state for %s\n",
478 if (gzwrite(compressedMem
, pmemAddr
, params()->range
.size()) !=
479 (int)params()->range
.size()) {
480 fatal("Write failed on physical memory checkpoint file '%s'\n",
484 if (gzclose(compressedMem
))
485 fatal("Close failed on physical memory checkpoint file '%s'\n",
490 PhysicalMemory::unserialize(Checkpoint
*cp
, const string
§ion
)
495 gzFile compressedMem
;
500 const uint32_t chunkSize
= 16384;
504 UNSERIALIZE_SCALAR(filename
);
506 filename
= cp
->cptDir
+ "/" + filename
;
509 int fd
= open(filename
.c_str(), O_RDONLY
);
512 fatal("Can't open physical memory checkpoint file '%s'", filename
);
515 compressedMem
= gzdopen(fd
, "rb");
516 if (compressedMem
== NULL
)
517 fatal("Insufficient memory to allocate compression state for %s\n",
520 // unmap file that was mmaped in the constructor
521 // This is done here to make sure that gzip and open don't muck with our
522 // nice large space of memory before we reallocate it
523 munmap((char*)pmemAddr
, params()->range
.size());
525 pmemAddr
= (uint8_t *)mmap(NULL
, params()->range
.size(),
526 PROT_READ
| PROT_WRITE
, MAP_ANON
| MAP_PRIVATE
, -1, 0);
528 if (pmemAddr
== (void *)MAP_FAILED
) {
530 fatal("Could not mmap physical memory!\n");
534 tempPage
= (long*)malloc(chunkSize
);
535 if (tempPage
== NULL
)
536 fatal("Unable to malloc memory to read file %s\n", filename
);
538 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
539 while (curSize
< params()->range
.size()) {
540 bytesRead
= gzread(compressedMem
, tempPage
, chunkSize
);
541 if (bytesRead
!= chunkSize
&&
542 bytesRead
!= params()->range
.size() - curSize
)
543 fatal("Read failed on physical memory checkpoint file '%s'"
544 " got %d bytes, expected %d or %d bytes\n",
545 filename
, bytesRead
, chunkSize
,
546 params()->range
.size() - curSize
);
548 assert(bytesRead
% sizeof(long) == 0);
550 for (uint32_t x
= 0; x
< bytesRead
/ sizeof(long); x
++)
552 if (*(tempPage
+x
) != 0) {
553 pmem_current
= (long*)(pmemAddr
+ curSize
+ x
* sizeof(long));
554 *pmem_current
= *(tempPage
+x
);
557 curSize
+= bytesRead
;
562 if (gzclose(compressedMem
))
563 fatal("Close failed on physical memory checkpoint file '%s'\n",
569 PhysicalMemoryParams::create()
571 return new PhysicalMemory(this);