2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/abstract_mem.hh"
45 #include "arch/locked_mem.hh"
46 #include "cpu/base.hh"
47 #include "cpu/thread_context.hh"
48 #include "debug/LLSC.hh"
49 #include "debug/MemoryAccess.hh"
50 #include "mem/packet_access.hh"
51 #include "sim/system.hh"
55 AbstractMemory::AbstractMemory(const Params
*p
) :
56 ClockedObject(p
), range(params()->range
), pmemAddr(NULL
),
57 backdoor(params()->range
, nullptr,
58 (MemBackdoor::Flags
)(MemBackdoor::Readable
|
59 MemBackdoor::Writeable
)),
60 confTableReported(p
->conf_table_reported
), inAddrMap(p
->in_addr_map
),
61 kvmMap(p
->kvm_map
), _system(NULL
),
67 AbstractMemory::init()
71 if (size() % _system
->getPageBytes() != 0)
72 panic("Memory Size not divisible by page size\n");
76 AbstractMemory::setBackingStore(uint8_t* pmem_addr
)
78 // If there was an existing backdoor, let everybody know it's going away.
80 backdoor
.invalidate();
82 // The back door can't handle interleaved memory.
83 backdoor
.ptr(range
.interleaved() ? nullptr : pmem_addr
);
88 AbstractMemory::MemStats::MemStats(AbstractMemory
&_mem
)
89 : Stats::Group(&_mem
), mem(_mem
),
90 bytesRead(this, "bytes_read",
91 "Number of bytes read from this memory"),
92 bytesInstRead(this, "bytes_inst_read",
93 "Number of instructions bytes read from this memory"),
94 bytesWritten(this, "bytes_written",
95 "Number of bytes written to this memory"),
96 numReads(this, "num_reads",
97 "Number of read requests responded to by this memory"),
98 numWrites(this, "num_writes",
99 "Number of write requests responded to by this memory"),
100 numOther(this, "num_other",
101 "Number of other requests responded to by this memory"),
102 bwRead(this, "bw_read",
103 "Total read bandwidth from this memory (bytes/s)"),
104 bwInstRead(this, "bw_inst_read",
105 "Instruction read bandwidth from this memory (bytes/s)"),
106 bwWrite(this, "bw_write",
107 "Write bandwidth from this memory (bytes/s)"),
108 bwTotal(this, "bw_total",
109 "Total bandwidth to/from this memory (bytes/s)")
114 AbstractMemory::MemStats::regStats()
116 using namespace Stats
;
118 Stats::Group::regStats();
120 System
*sys
= mem
.system();
122 const auto max_masters
= sys
->maxMasters();
126 .flags(total
| nozero
| nonan
)
128 for (int i
= 0; i
< max_masters
; i
++) {
129 bytesRead
.subname(i
, sys
->getMasterName(i
));
134 .flags(total
| nozero
| nonan
)
136 for (int i
= 0; i
< max_masters
; i
++) {
137 bytesInstRead
.subname(i
, sys
->getMasterName(i
));
142 .flags(total
| nozero
| nonan
)
144 for (int i
= 0; i
< max_masters
; i
++) {
145 bytesWritten
.subname(i
, sys
->getMasterName(i
));
150 .flags(total
| nozero
| nonan
)
152 for (int i
= 0; i
< max_masters
; i
++) {
153 numReads
.subname(i
, sys
->getMasterName(i
));
158 .flags(total
| nozero
| nonan
)
160 for (int i
= 0; i
< max_masters
; i
++) {
161 numWrites
.subname(i
, sys
->getMasterName(i
));
166 .flags(total
| nozero
| nonan
)
168 for (int i
= 0; i
< max_masters
; i
++) {
169 numOther
.subname(i
, sys
->getMasterName(i
));
175 .flags(total
| nozero
| nonan
)
177 for (int i
= 0; i
< max_masters
; i
++) {
178 bwRead
.subname(i
, sys
->getMasterName(i
));
183 .prereq(bytesInstRead
)
184 .flags(total
| nozero
| nonan
)
186 for (int i
= 0; i
< max_masters
; i
++) {
187 bwInstRead
.subname(i
, sys
->getMasterName(i
));
192 .prereq(bytesWritten
)
193 .flags(total
| nozero
| nonan
)
195 for (int i
= 0; i
< max_masters
; i
++) {
196 bwWrite
.subname(i
, sys
->getMasterName(i
));
202 .flags(total
| nozero
| nonan
)
204 for (int i
= 0; i
< max_masters
; i
++) {
205 bwTotal
.subname(i
, sys
->getMasterName(i
));
208 bwRead
= bytesRead
/ simSeconds
;
209 bwInstRead
= bytesInstRead
/ simSeconds
;
210 bwWrite
= bytesWritten
/ simSeconds
;
211 bwTotal
= (bytesRead
+ bytesWritten
) / simSeconds
;
215 AbstractMemory::getAddrRange() const
220 // Add load-locked to tracking list. Should only be called if the
221 // operation is a load and the LLSC flag is set.
223 AbstractMemory::trackLoadLocked(PacketPtr pkt
)
225 const RequestPtr
&req
= pkt
->req
;
226 Addr paddr
= LockedAddr::mask(req
->getPaddr());
228 // first we check if we already have a locked addr for this
229 // xc. Since each xc only gets one, we just update the
230 // existing record with the new address.
231 list
<LockedAddr
>::iterator i
;
233 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
234 if (i
->matchesContext(req
)) {
235 DPRINTF(LLSC
, "Modifying lock record: context %d addr %#x\n",
236 req
->contextId(), paddr
);
242 // no record for this xc: need to allocate a new one
243 DPRINTF(LLSC
, "Adding lock record: context %d addr %#x\n",
244 req
->contextId(), paddr
);
245 lockedAddrList
.push_front(LockedAddr(req
));
249 // Called on *writes* only... both regular stores and
250 // store-conditional operations. Check for conventional stores which
251 // conflict with locked addresses, and for success/failure of store
254 AbstractMemory::checkLockedAddrList(PacketPtr pkt
)
256 const RequestPtr
&req
= pkt
->req
;
257 Addr paddr
= LockedAddr::mask(req
->getPaddr());
258 bool isLLSC
= pkt
->isLLSC();
260 // Initialize return value. Non-conditional stores always
261 // succeed. Assume conditional stores will fail until proven
263 bool allowStore
= !isLLSC
;
265 // Iterate over list. Note that there could be multiple matching records,
266 // as more than one context could have done a load locked to this location.
267 // Only remove records when we succeed in finding a record for (xc, addr);
268 // then, remove all records with this address. Failed store-conditionals do
269 // not blow unrelated reservations.
270 list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
273 while (i
!= lockedAddrList
.end()) {
274 if (i
->addr
== paddr
&& i
->matchesContext(req
)) {
275 // it's a store conditional, and as far as the memory system can
276 // tell, the requesting context's lock is still valid.
277 DPRINTF(LLSC
, "StCond success: context %d addr %#x\n",
278 req
->contextId(), paddr
);
282 // If we didn't find a match, keep searching! Someone else may well
283 // have a reservation on this line here but we may find ours in just
287 req
->setExtraData(allowStore
? 1 : 0);
289 // LLSCs that succeeded AND non-LLSC stores both fall into here:
291 // We write address paddr. However, there may be several entries with a
292 // reservation on this address (for other contextIds) and they must all
294 i
= lockedAddrList
.begin();
295 while (i
!= lockedAddrList
.end()) {
296 if (i
->addr
== paddr
) {
297 DPRINTF(LLSC
, "Erasing lock record: context %d addr %#x\n",
298 i
->contextId
, paddr
);
299 ContextID owner_cid
= i
->contextId
;
300 assert(owner_cid
!= InvalidContextID
);
301 ContextID requester_cid
= req
->hasContextId() ?
304 if (owner_cid
!= requester_cid
) {
305 ThreadContext
* ctx
= system()->getThreadContext(owner_cid
);
306 TheISA::globalClearExclusive(ctx
);
308 i
= lockedAddrList
.erase(i
);
320 tracePacket(System
*sys
, const char *label
, PacketPtr pkt
)
322 int size
= pkt
->getSize();
323 #if THE_ISA != NULL_ISA
324 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
325 DPRINTF(MemoryAccess
,"%s from %s of size %i on address %#x data "
326 "%#x %c\n", label
, sys
->getMasterName(pkt
->req
->masterId()),
327 size
, pkt
->getAddr(), pkt
->getUintX(TheISA::GuestByteOrder
),
328 pkt
->req
->isUncacheable() ? 'U' : 'C');
332 DPRINTF(MemoryAccess
, "%s from %s of size %i on address %#x %c\n",
333 label
, sys
->getMasterName(pkt
->req
->masterId()),
334 size
, pkt
->getAddr(), pkt
->req
->isUncacheable() ? 'U' : 'C');
335 DDUMP(MemoryAccess
, pkt
->getConstPtr
<uint8_t>(), pkt
->getSize());
338 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
340 # define TRACE_PACKET(A)
344 AbstractMemory::access(PacketPtr pkt
)
346 if (pkt
->cacheResponding()) {
347 DPRINTF(MemoryAccess
, "Cache responding to %#llx: not responding\n",
352 if (pkt
->cmd
== MemCmd::CleanEvict
|| pkt
->cmd
== MemCmd::WritebackClean
) {
353 DPRINTF(MemoryAccess
, "CleanEvict on 0x%x: not responding\n",
358 assert(pkt
->getAddrRange().isSubset(range
));
360 uint8_t *host_addr
= toHostAddr(pkt
->getAddr());
362 if (pkt
->cmd
== MemCmd::SwapReq
) {
363 if (pkt
->isAtomicOp()) {
365 pkt
->setData(host_addr
);
366 (*(pkt
->getAtomicOp()))(host_addr
);
369 std::vector
<uint8_t> overwrite_val(pkt
->getSize());
370 uint64_t condition_val64
;
371 uint32_t condition_val32
;
373 panic_if(!pmemAddr
, "Swap only works if there is real memory " \
374 "(i.e. null=False)");
376 bool overwrite_mem
= true;
377 // keep a copy of our possible write value, and copy what is at the
378 // memory address into the packet
379 pkt
->writeData(&overwrite_val
[0]);
380 pkt
->setData(host_addr
);
382 if (pkt
->req
->isCondSwap()) {
383 if (pkt
->getSize() == sizeof(uint64_t)) {
384 condition_val64
= pkt
->req
->getExtraData();
385 overwrite_mem
= !std::memcmp(&condition_val64
, host_addr
,
387 } else if (pkt
->getSize() == sizeof(uint32_t)) {
388 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
389 overwrite_mem
= !std::memcmp(&condition_val32
, host_addr
,
392 panic("Invalid size for conditional read/write\n");
396 std::memcpy(host_addr
, &overwrite_val
[0], pkt
->getSize());
398 assert(!pkt
->req
->isInstFetch());
399 TRACE_PACKET("Read/Write");
400 stats
.numOther
[pkt
->req
->masterId()]++;
402 } else if (pkt
->isRead()) {
403 assert(!pkt
->isWrite());
405 assert(!pkt
->fromCache());
406 // if the packet is not coming from a cache then we have
407 // to do the LL/SC tracking here
408 trackLoadLocked(pkt
);
411 pkt
->setData(host_addr
);
413 TRACE_PACKET(pkt
->req
->isInstFetch() ? "IFetch" : "Read");
414 stats
.numReads
[pkt
->req
->masterId()]++;
415 stats
.bytesRead
[pkt
->req
->masterId()] += pkt
->getSize();
416 if (pkt
->req
->isInstFetch())
417 stats
.bytesInstRead
[pkt
->req
->masterId()] += pkt
->getSize();
418 } else if (pkt
->isInvalidate() || pkt
->isClean()) {
419 assert(!pkt
->isWrite());
420 // in a fastmem system invalidating and/or cleaning packets
421 // can be seen due to cache maintenance requests
423 // no need to do anything
424 } else if (pkt
->isWrite()) {
427 pkt
->writeData(host_addr
);
428 DPRINTF(MemoryAccess
, "%s write due to %s\n",
429 __func__
, pkt
->print());
431 assert(!pkt
->req
->isInstFetch());
432 TRACE_PACKET("Write");
433 stats
.numWrites
[pkt
->req
->masterId()]++;
434 stats
.bytesWritten
[pkt
->req
->masterId()] += pkt
->getSize();
437 panic("Unexpected packet %s", pkt
->print());
440 if (pkt
->needsResponse()) {
446 AbstractMemory::functionalAccess(PacketPtr pkt
)
448 assert(pkt
->getAddrRange().isSubset(range
));
450 uint8_t *host_addr
= toHostAddr(pkt
->getAddr());
454 pkt
->setData(host_addr
);
456 TRACE_PACKET("Read");
458 } else if (pkt
->isWrite()) {
460 pkt
->writeData(host_addr
);
462 TRACE_PACKET("Write");
464 } else if (pkt
->isPrint()) {
465 Packet::PrintReqState
*prs
=
466 dynamic_cast<Packet::PrintReqState
*>(pkt
->senderState
);
468 // Need to call printLabels() explicitly since we're not going
469 // through printObj().
471 // Right now we just print the single byte at the specified address.
472 ccprintf(prs
->os
, "%s%#x\n", prs
->curPrefix(), *host_addr
);
474 panic("AbstractMemory: unimplemented functional command %s",