2 * Copyright (c) 2010-2012 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Ron Dreslinski
47 #include "cpu/base.hh"
48 #include "cpu/thread_context.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/MemoryAccess.hh"
51 #include "mem/abstract_mem.hh"
52 #include "mem/packet_access.hh"
53 #include "sim/system.hh"
57 AbstractMemory::AbstractMemory(const Params
*p
) :
58 MemObject(p
), range(params()->range
), pmemAddr(NULL
),
59 confTableReported(p
->conf_table_reported
), inAddrMap(p
->in_addr_map
),
65 AbstractMemory::init()
69 if (size() % _system
->getPageBytes() != 0)
70 panic("Memory Size not divisible by page size\n");
74 AbstractMemory::setBackingStore(uint8_t* pmem_addr
)
80 AbstractMemory::regStats()
82 using namespace Stats
;
87 .init(system()->maxMasters())
88 .name(name() + ".bytes_read")
89 .desc("Number of bytes read from this memory")
90 .flags(total
| nozero
| nonan
)
92 for (int i
= 0; i
< system()->maxMasters(); i
++) {
93 bytesRead
.subname(i
, system()->getMasterName(i
));
96 .init(system()->maxMasters())
97 .name(name() + ".bytes_inst_read")
98 .desc("Number of instructions bytes read from this memory")
99 .flags(total
| nozero
| nonan
)
101 for (int i
= 0; i
< system()->maxMasters(); i
++) {
102 bytesInstRead
.subname(i
, system()->getMasterName(i
));
105 .init(system()->maxMasters())
106 .name(name() + ".bytes_written")
107 .desc("Number of bytes written to this memory")
108 .flags(total
| nozero
| nonan
)
110 for (int i
= 0; i
< system()->maxMasters(); i
++) {
111 bytesWritten
.subname(i
, system()->getMasterName(i
));
114 .init(system()->maxMasters())
115 .name(name() + ".num_reads")
116 .desc("Number of read requests responded to by this memory")
117 .flags(total
| nozero
| nonan
)
119 for (int i
= 0; i
< system()->maxMasters(); i
++) {
120 numReads
.subname(i
, system()->getMasterName(i
));
123 .init(system()->maxMasters())
124 .name(name() + ".num_writes")
125 .desc("Number of write requests responded to by this memory")
126 .flags(total
| nozero
| nonan
)
128 for (int i
= 0; i
< system()->maxMasters(); i
++) {
129 numWrites
.subname(i
, system()->getMasterName(i
));
132 .init(system()->maxMasters())
133 .name(name() + ".num_other")
134 .desc("Number of other requests responded to by this memory")
135 .flags(total
| nozero
| nonan
)
137 for (int i
= 0; i
< system()->maxMasters(); i
++) {
138 numOther
.subname(i
, system()->getMasterName(i
));
141 .name(name() + ".bw_read")
142 .desc("Total read bandwidth from this memory (bytes/s)")
145 .flags(total
| nozero
| nonan
)
147 for (int i
= 0; i
< system()->maxMasters(); i
++) {
148 bwRead
.subname(i
, system()->getMasterName(i
));
152 .name(name() + ".bw_inst_read")
153 .desc("Instruction read bandwidth from this memory (bytes/s)")
155 .prereq(bytesInstRead
)
156 .flags(total
| nozero
| nonan
)
158 for (int i
= 0; i
< system()->maxMasters(); i
++) {
159 bwInstRead
.subname(i
, system()->getMasterName(i
));
162 .name(name() + ".bw_write")
163 .desc("Write bandwidth from this memory (bytes/s)")
165 .prereq(bytesWritten
)
166 .flags(total
| nozero
| nonan
)
168 for (int i
= 0; i
< system()->maxMasters(); i
++) {
169 bwWrite
.subname(i
, system()->getMasterName(i
));
172 .name(name() + ".bw_total")
173 .desc("Total bandwidth to/from this memory (bytes/s)")
176 .flags(total
| nozero
| nonan
)
178 for (int i
= 0; i
< system()->maxMasters(); i
++) {
179 bwTotal
.subname(i
, system()->getMasterName(i
));
181 bwRead
= bytesRead
/ simSeconds
;
182 bwInstRead
= bytesInstRead
/ simSeconds
;
183 bwWrite
= bytesWritten
/ simSeconds
;
184 bwTotal
= (bytesRead
+ bytesWritten
) / simSeconds
;
188 AbstractMemory::getAddrRange() const
193 // Add load-locked to tracking list. Should only be called if the
194 // operation is a load and the LLSC flag is set.
196 AbstractMemory::trackLoadLocked(PacketPtr pkt
)
198 Request
*req
= pkt
->req
;
199 Addr paddr
= LockedAddr::mask(req
->getPaddr());
201 // first we check if we already have a locked addr for this
202 // xc. Since each xc only gets one, we just update the
203 // existing record with the new address.
204 list
<LockedAddr
>::iterator i
;
206 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
207 if (i
->matchesContext(req
)) {
208 DPRINTF(LLSC
, "Modifying lock record: context %d addr %#x\n",
209 req
->contextId(), paddr
);
215 // no record for this xc: need to allocate a new one
216 DPRINTF(LLSC
, "Adding lock record: context %d addr %#x\n",
217 req
->contextId(), paddr
);
218 lockedAddrList
.push_front(LockedAddr(req
));
222 // Called on *writes* only... both regular stores and
223 // store-conditional operations. Check for conventional stores which
224 // conflict with locked addresses, and for success/failure of store
227 AbstractMemory::checkLockedAddrList(PacketPtr pkt
)
229 Request
*req
= pkt
->req
;
230 Addr paddr
= LockedAddr::mask(req
->getPaddr());
231 bool isLLSC
= pkt
->isLLSC();
233 // Initialize return value. Non-conditional stores always
234 // succeed. Assume conditional stores will fail until proven
236 bool allowStore
= !isLLSC
;
238 // Iterate over list. Note that there could be multiple matching records,
239 // as more than one context could have done a load locked to this location.
240 // Only remove records when we succeed in finding a record for (xc, addr);
241 // then, remove all records with this address. Failed store-conditionals do
242 // not blow unrelated reservations.
243 list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
246 while (i
!= lockedAddrList
.end()) {
247 if (i
->addr
== paddr
&& i
->matchesContext(req
)) {
248 // it's a store conditional, and as far as the memory system can
249 // tell, the requesting context's lock is still valid.
250 DPRINTF(LLSC
, "StCond success: context %d addr %#x\n",
251 req
->contextId(), paddr
);
255 // If we didn't find a match, keep searching! Someone else may well
256 // have a reservation on this line here but we may find ours in just
260 req
->setExtraData(allowStore
? 1 : 0);
262 // LLSCs that succeeded AND non-LLSC stores both fall into here:
264 // We write address paddr. However, there may be several entries with a
265 // reservation on this address (for other contextIds) and they must all
267 i
= lockedAddrList
.begin();
268 while (i
!= lockedAddrList
.end()) {
269 if (i
->addr
== paddr
) {
270 DPRINTF(LLSC
, "Erasing lock record: context %d addr %#x\n",
271 i
->contextId
, paddr
);
272 // For ARM, a spinlock would typically include a Wait
273 // For Event (WFE) to conserve energy. The ARMv8
274 // architecture specifies that an event is
275 // automatically generated when clearing the exclusive
276 // monitor to wake up the processor in WFE.
277 ThreadContext
* ctx
= system()->getThreadContext(i
->contextId
);
278 ctx
->getCpuPtr()->wakeup(ctx
->threadId());
279 i
= lockedAddrList
.erase(i
);
294 DPRINTF(MemoryAccess,"%s from %s of size %i on address 0x%x data " \
295 "0x%x %c\n", A, system()->getMasterName(pkt->req->masterId()),\
296 pkt->getSize(), pkt->getAddr(), pkt->get<T>(), \
297 pkt->req->isUncacheable() ? 'U' : 'C'); \
301 #define TRACE_PACKET(A) \
303 switch (pkt->getSize()) { \
309 DPRINTF(MemoryAccess, "%s from %s of size %i on address 0x%x %c\n",\
310 A, system()->getMasterName(pkt->req->masterId()), \
311 pkt->getSize(), pkt->getAddr(), \
312 pkt->req->isUncacheable() ? 'U' : 'C'); \
313 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize()); \
319 #define TRACE_PACKET(A)
324 AbstractMemory::access(PacketPtr pkt
)
326 if (pkt
->cacheResponding()) {
327 DPRINTF(MemoryAccess
, "Cache responding to %#llx: not responding\n",
332 if (pkt
->cmd
== MemCmd::CleanEvict
|| pkt
->cmd
== MemCmd::WritebackClean
) {
333 DPRINTF(MemoryAccess
, "CleanEvict on 0x%x: not responding\n",
338 assert(AddrRange(pkt
->getAddr(),
339 pkt
->getAddr() + (pkt
->getSize() - 1)).isSubset(range
));
341 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - range
.start();
343 if (pkt
->cmd
== MemCmd::SwapReq
) {
344 if (pkt
->isAtomicOp()) {
346 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
347 (*(pkt
->getAtomicOp()))(hostAddr
);
350 std::vector
<uint8_t> overwrite_val(pkt
->getSize());
351 uint64_t condition_val64
;
352 uint32_t condition_val32
;
355 panic("Swap only works if there is real memory (i.e. null=False)");
357 bool overwrite_mem
= true;
358 // keep a copy of our possible write value, and copy what is at the
359 // memory address into the packet
360 std::memcpy(&overwrite_val
[0], pkt
->getConstPtr
<uint8_t>(),
362 std::memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
364 if (pkt
->req
->isCondSwap()) {
365 if (pkt
->getSize() == sizeof(uint64_t)) {
366 condition_val64
= pkt
->req
->getExtraData();
367 overwrite_mem
= !std::memcmp(&condition_val64
, hostAddr
,
369 } else if (pkt
->getSize() == sizeof(uint32_t)) {
370 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
371 overwrite_mem
= !std::memcmp(&condition_val32
, hostAddr
,
374 panic("Invalid size for conditional read/write\n");
378 std::memcpy(hostAddr
, &overwrite_val
[0], pkt
->getSize());
380 assert(!pkt
->req
->isInstFetch());
381 TRACE_PACKET("Read/Write");
382 numOther
[pkt
->req
->masterId()]++;
384 } else if (pkt
->isRead()) {
385 assert(!pkt
->isWrite());
387 trackLoadLocked(pkt
);
390 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
391 TRACE_PACKET(pkt
->req
->isInstFetch() ? "IFetch" : "Read");
392 numReads
[pkt
->req
->masterId()]++;
393 bytesRead
[pkt
->req
->masterId()] += pkt
->getSize();
394 if (pkt
->req
->isInstFetch())
395 bytesInstRead
[pkt
->req
->masterId()] += pkt
->getSize();
396 } else if (pkt
->isInvalidate()) {
397 // no need to do anything
398 // this clause is intentionally before the write clause: the only
399 // transaction that is both a write and an invalidate is
400 // WriteInvalidate, and for the sake of consistency, it does not
401 // write to memory. in a cacheless system, there are no WriteInv's
402 // because the Write -> WriteInvalidate rewrite happens in the cache.
403 } else if (pkt
->isWrite()) {
406 memcpy(hostAddr
, pkt
->getConstPtr
<uint8_t>(), pkt
->getSize());
407 DPRINTF(MemoryAccess
, "%s wrote %x bytes to address %x\n",
408 __func__
, pkt
->getSize(), pkt
->getAddr());
410 assert(!pkt
->req
->isInstFetch());
411 TRACE_PACKET("Write");
412 numWrites
[pkt
->req
->masterId()]++;
413 bytesWritten
[pkt
->req
->masterId()] += pkt
->getSize();
416 panic("unimplemented");
419 if (pkt
->needsResponse()) {
425 AbstractMemory::functionalAccess(PacketPtr pkt
)
427 assert(AddrRange(pkt
->getAddr(),
428 pkt
->getAddr() + pkt
->getSize() - 1).isSubset(range
));
430 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - range
.start();
434 memcpy(pkt
->getPtr
<uint8_t>(), hostAddr
, pkt
->getSize());
435 TRACE_PACKET("Read");
437 } else if (pkt
->isWrite()) {
439 memcpy(hostAddr
, pkt
->getConstPtr
<uint8_t>(), pkt
->getSize());
440 TRACE_PACKET("Write");
442 } else if (pkt
->isPrint()) {
443 Packet::PrintReqState
*prs
=
444 dynamic_cast<Packet::PrintReqState
*>(pkt
->senderState
);
446 // Need to call printLabels() explicitly since we're not going
447 // through printObj().
449 // Right now we just print the single byte at the specified address.
450 ccprintf(prs
->os
, "%s%#x\n", prs
->curPrefix(), *hostAddr
);
452 panic("AbstractMemory: unimplemented functional command %s",