2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Ron Dreslinski
32 #include <sys/types.h>
42 #include "arch/isa_traits.hh"
43 #include "base/misc.hh"
44 #include "config/full_system.hh"
45 #include "mem/physical.hh"
46 #include "sim/builder.hh"
47 #include "sim/eventq.hh"
48 #include "sim/host.hh"
51 using namespace TheISA
;
53 PhysicalMemory::PhysicalMemory(Params
*p
)
54 : MemObject(p
->name
), pmemAddr(NULL
), port(NULL
), lat(p
->latency
), _params(p
)
56 if (params()->addrRange
.size() % TheISA::PageBytes
!= 0)
57 panic("Memory Size not divisible by page size\n");
59 int map_flags
= MAP_ANON
| MAP_PRIVATE
;
60 pmemAddr
= (uint8_t *)mmap(NULL
, params()->addrRange
.size(), PROT_READ
| PROT_WRITE
,
63 if (pmemAddr
== (void *)MAP_FAILED
) {
65 fatal("Could not mmap!\n");
68 //If requested, initialize all the memory to 0
70 memset(pmemAddr
, 0, params()->addrRange
.size());
76 PhysicalMemory::init()
79 panic("PhysicalMemory not connected to anything!");
80 port
->sendStatusChange(Port::RangeChange
);
83 PhysicalMemory::~PhysicalMemory()
86 munmap(pmemAddr
, params()->addrRange
.size());
91 PhysicalMemory::new_page()
93 Addr return_addr
= pagePtr
<< LogVMPageSize
;
94 return_addr
+= params()->addrRange
.start
;
101 PhysicalMemory::deviceBlockSize()
103 //Can accept anysize request
108 PhysicalMemory::calculateLatency(PacketPtr pkt
)
115 // Add load-locked to tracking list. Should only be called if the
116 // operation is a load and the LOCKED flag is set.
118 PhysicalMemory::trackLoadLocked(Request
*req
)
120 Addr paddr
= LockedAddr::mask(req
->getPaddr());
122 // first we check if we already have a locked addr for this
123 // xc. Since each xc only gets one, we just update the
124 // existing record with the new address.
125 list
<LockedAddr
>::iterator i
;
127 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
128 if (i
->matchesContext(req
)) {
129 DPRINTF(LLSC
, "Modifying lock record: cpu %d thread %d addr %#x\n",
130 req
->getCpuNum(), req
->getThreadNum(), paddr
);
136 // no record for this xc: need to allocate a new one
137 DPRINTF(LLSC
, "Adding lock record: cpu %d thread %d addr %#x\n",
138 req
->getCpuNum(), req
->getThreadNum(), paddr
);
139 lockedAddrList
.push_front(LockedAddr(req
));
143 // Called on *writes* only... both regular stores and
144 // store-conditional operations. Check for conventional stores which
145 // conflict with locked addresses, and for success/failure of store
148 PhysicalMemory::checkLockedAddrList(Request
*req
)
150 Addr paddr
= LockedAddr::mask(req
->getPaddr());
151 bool isLocked
= req
->isLocked();
153 // Initialize return value. Non-conditional stores always
154 // succeed. Assume conditional stores will fail until proven
156 bool success
= !isLocked
;
158 // Iterate over list. Note that there could be multiple matching
159 // records, as more than one context could have done a load locked
161 list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
163 while (i
!= lockedAddrList
.end()) {
165 if (i
->addr
== paddr
) {
166 // we have a matching address
168 if (isLocked
&& i
->matchesContext(req
)) {
169 // it's a store conditional, and as far as the memory
170 // system can tell, the requesting context's lock is
172 DPRINTF(LLSC
, "StCond success: cpu %d thread %d addr %#x\n",
173 req
->getCpuNum(), req
->getThreadNum(), paddr
);
177 // Get rid of our record of this lock and advance to next
178 DPRINTF(LLSC
, "Erasing lock record: cpu %d thread %d addr %#x\n",
179 i
->cpuNum
, i
->threadNum
, paddr
);
180 i
= lockedAddrList
.erase(i
);
183 // no match: advance to next record
189 req
->setScResult(success
? 1 : 0);
196 PhysicalMemory::doFunctionalAccess(PacketPtr pkt
)
198 assert(pkt
->getAddr() >= params()->addrRange
.start
&&
199 pkt
->getAddr() + pkt
->getSize() <= params()->addrRange
.start
+
200 params()->addrRange
.size());
203 if (pkt
->req
->isLocked()) {
204 trackLoadLocked(pkt
->req
);
206 DPRINTF(MemoryAccess
, "Performing Read of size %i on address 0x%x\n",
207 pkt
->getSize(), pkt
->getAddr());
208 memcpy(pkt
->getPtr
<uint8_t>(),
209 pmemAddr
+ pkt
->getAddr() - params()->addrRange
.start
,
212 else if (pkt
->isWrite()) {
213 if (writeOK(pkt
->req
)) {
214 DPRINTF(MemoryAccess
, "Performing Write of size %i on address 0x%x\n",
215 pkt
->getSize(), pkt
->getAddr());
216 memcpy(pmemAddr
+ pkt
->getAddr() - params()->addrRange
.start
,
217 pkt
->getPtr
<uint8_t>(), pkt
->getSize());
220 else if (pkt
->isInvalidate()) {
221 //upgrade or invalidate
222 pkt
->flags
|= SATISFIED
;
225 panic("unimplemented");
228 pkt
->result
= Packet::Success
;
232 PhysicalMemory::getPort(const std::string
&if_name
, int idx
)
234 if (if_name
== "port" && idx
== -1) {
236 panic("PhysicalMemory::getPort: additional port requested to memory!");
237 port
= new MemoryPort(name() + "-port", this);
239 } else if (if_name
== "functional") {
240 /* special port for functional writes at startup. And for memtester */
241 return new MemoryPort(name() + "-funcport", this);
243 panic("PhysicalMemory::getPort: unknown port %s requested", if_name
);
248 PhysicalMemory::recvStatusChange(Port::Status status
)
252 PhysicalMemory::MemoryPort::MemoryPort(const std::string
&_name
,
253 PhysicalMemory
*_memory
)
254 : SimpleTimingPort(_name
), memory(_memory
)
258 PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status
)
260 memory
->recvStatusChange(status
);
264 PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList
&resp
,
265 AddrRangeList
&snoop
)
267 memory
->getAddressRanges(resp
, snoop
);
271 PhysicalMemory::getAddressRanges(AddrRangeList
&resp
, AddrRangeList
&snoop
)
275 resp
.push_back(RangeSize(params()->addrRange
.start
,
276 params()->addrRange
.size()));
280 PhysicalMemory::MemoryPort::deviceBlockSize()
282 return memory
->deviceBlockSize();
286 PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt
)
288 memory
->doFunctionalAccess(pkt
);
289 return memory
->calculateLatency(pkt
);
293 PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt
)
295 //Since we are overriding the function, make sure to have the impl of the
296 //check or functional accesses here.
297 std::list
<std::pair
<Tick
,PacketPtr
> >::iterator i
= transmitList
.begin();
298 std::list
<std::pair
<Tick
,PacketPtr
> >::iterator end
= transmitList
.end();
301 while (i
!= end
&& notDone
) {
302 PacketPtr target
= i
->second
;
303 // If the target contains data, and it overlaps the
304 // probed request, need to update data
305 if (target
->intersect(pkt
))
306 notDone
= fixPacket(pkt
, target
);
310 // Default implementation of SimpleTimingPort::recvFunctional()
311 // calls recvAtomic() and throws away the latency; we can save a
312 // little here by just not calculating the latency.
313 memory
->doFunctionalAccess(pkt
);
317 PhysicalMemory::drain(Event
*de
)
319 int count
= port
->drain(de
);
321 changeState(Draining
);
323 changeState(Drained
);
328 PhysicalMemory::serialize(ostream
&os
)
330 gzFile compressedMem
;
331 string filename
= name() + ".physmem";
333 SERIALIZE_SCALAR(filename
);
336 string thefile
= Checkpoint::dir() + "/" + filename
.c_str();
337 int fd
= creat(thefile
.c_str(), 0664);
340 fatal("Can't open physical memory checkpoint file '%s'\n", filename
);
343 compressedMem
= gzdopen(fd
, "wb");
344 if (compressedMem
== NULL
)
345 fatal("Insufficient memory to allocate compression state for %s\n",
348 if (gzwrite(compressedMem
, pmemAddr
, params()->addrRange
.size()) != params()->addrRange
.size()) {
349 fatal("Write failed on physical memory checkpoint file '%s'\n",
353 if (gzclose(compressedMem
))
354 fatal("Close failed on physical memory checkpoint file '%s'\n",
359 PhysicalMemory::unserialize(Checkpoint
*cp
, const string
§ion
)
361 gzFile compressedMem
;
366 const int chunkSize
= 16384;
371 UNSERIALIZE_SCALAR(filename
);
373 filename
= cp
->cptDir
+ "/" + filename
;
376 int fd
= open(filename
.c_str(), O_RDONLY
);
379 fatal("Can't open physical memory checkpoint file '%s'", filename
);
382 compressedMem
= gzdopen(fd
, "rb");
383 if (compressedMem
== NULL
)
384 fatal("Insufficient memory to allocate compression state for %s\n",
387 // unmap file that was mmaped in the constructor
388 // This is done here to make sure that gzip and open don't muck with our
389 // nice large space of memory before we reallocate it
390 munmap(pmemAddr
, params()->addrRange
.size());
392 pmemAddr
= (uint8_t *)mmap(NULL
, params()->addrRange
.size(), PROT_READ
| PROT_WRITE
,
393 MAP_ANON
| MAP_PRIVATE
, -1, 0);
395 if (pmemAddr
== (void *)MAP_FAILED
) {
397 fatal("Could not mmap physical memory!\n");
401 tempPage
= (long*)malloc(chunkSize
);
402 if (tempPage
== NULL
)
403 fatal("Unable to malloc memory to read file %s\n", filename
);
405 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
406 while (curSize
< params()->addrRange
.size()) {
407 bytesRead
= gzread(compressedMem
, tempPage
, chunkSize
);
408 if (bytesRead
!= chunkSize
&& bytesRead
!= params()->addrRange
.size() - curSize
)
409 fatal("Read failed on physical memory checkpoint file '%s'"
410 " got %d bytes, expected %d or %d bytes\n",
411 filename
, bytesRead
, chunkSize
, params()->addrRange
.size()-curSize
);
413 assert(bytesRead
% sizeof(long) == 0);
415 for (int x
= 0; x
< bytesRead
/sizeof(long); x
++)
417 if (*(tempPage
+x
) != 0) {
418 pmem_current
= (long*)(pmemAddr
+ curSize
+ x
* sizeof(long));
419 *pmem_current
= *(tempPage
+x
);
422 curSize
+= bytesRead
;
427 if (gzclose(compressedMem
))
428 fatal("Close failed on physical memory checkpoint file '%s'\n",
434 BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory
)
437 Param
<Range
<Addr
> > range
;
441 END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory
)
443 BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory
)
445 INIT_PARAM_DFLT(file
, "memory mapped file", ""),
446 INIT_PARAM(range
, "Device Address Range"),
447 INIT_PARAM(latency
, "Memory access latency"),
448 INIT_PARAM(zero
, "Zero initialize memory")
450 END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory
)
452 CREATE_SIM_OBJECT(PhysicalMemory
)
454 PhysicalMemory::Params
*p
= new PhysicalMemory::Params
;
455 p
->name
= getInstanceName();
456 p
->addrRange
= range
;
457 p
->latency
= latency
;
459 return new PhysicalMemory(p
);
462 REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory
)