2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/abstract_mem.hh"
45 #include "arch/locked_mem.hh"
46 #include "base/loader/memory_image.hh"
47 #include "base/loader/object_file.hh"
48 #include "cpu/thread_context.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/MemoryAccess.hh"
51 #include "mem/packet_access.hh"
52 #include "sim/system.hh"
54 AbstractMemory::AbstractMemory(const Params
&p
) :
55 ClockedObject(p
), range(p
.range
), pmemAddr(NULL
),
56 backdoor(params().range
, nullptr,
57 (MemBackdoor::Flags
)(MemBackdoor::Readable
|
58 MemBackdoor::Writeable
)),
59 confTableReported(p
.conf_table_reported
), inAddrMap(p
.in_addr_map
),
60 kvmMap(p
.kvm_map
), _system(NULL
),
63 panic_if(!range
.valid() || !range
.size(),
64 "Memory range %s must be valid with non-zero size.",
69 AbstractMemory::initState()
71 ClockedObject::initState();
73 const auto &file
= params().image_file
;
77 auto *object
= Loader::createObjectFile(file
, true);
78 fatal_if(!object
, "%s: Could not load %s.", name(), file
);
80 Loader::debugSymbolTable
.insert(*object
->symtab().globals());
81 Loader::MemoryImage image
= object
->buildImage();
83 AddrRange
image_range(image
.minAddr(), image
.maxAddr());
84 if (!range
.contains(image_range
.start())) {
85 warn("%s: Moving image from %s to memory address range %s.",
86 name(), image_range
.to_string(), range
.to_string());
87 image
= image
.offset(range
.start());
88 image_range
= AddrRange(image
.minAddr(), image
.maxAddr());
90 panic_if(!image_range
.isSubset(range
), "%s: memory image %s doesn't fit.",
93 PortProxy
proxy([this](PacketPtr pkt
) { functionalAccess(pkt
); },
94 system()->cacheLineSize());
96 panic_if(!image
.write(proxy
), "%s: Unable to write image.");
100 AbstractMemory::setBackingStore(uint8_t* pmem_addr
)
102 // If there was an existing backdoor, let everybody know it's going away.
104 backdoor
.invalidate();
106 // The back door can't handle interleaved memory.
107 backdoor
.ptr(range
.interleaved() ? nullptr : pmem_addr
);
109 pmemAddr
= pmem_addr
;
112 AbstractMemory::MemStats::MemStats(AbstractMemory
&_mem
)
113 : Stats::Group(&_mem
), mem(_mem
),
114 ADD_STAT(bytesRead
, UNIT_BYTE
, "Number of bytes read from this memory"),
115 ADD_STAT(bytesInstRead
, UNIT_BYTE
,
116 "Number of instructions bytes read from this memory"),
117 ADD_STAT(bytesWritten
, UNIT_BYTE
,
118 "Number of bytes written to this memory"),
119 ADD_STAT(numReads
, UNIT_COUNT
,
120 "Number of read requests responded to by this memory"),
121 ADD_STAT(numWrites
, UNIT_COUNT
,
122 "Number of write requests responded to by this memory"),
123 ADD_STAT(numOther
, UNIT_COUNT
,
124 "Number of other requests responded to by this memory"),
125 ADD_STAT(bwRead
, UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
126 "Total read bandwidth from this memory"),
127 ADD_STAT(bwInstRead
, UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
128 "Instruction read bandwidth from this memory"),
129 ADD_STAT(bwWrite
, UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
130 "Write bandwidth from this memory"),
131 ADD_STAT(bwTotal
, UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
132 "Total bandwidth to/from this memory")
137 AbstractMemory::MemStats::regStats()
139 using namespace Stats
;
141 Stats::Group::regStats();
143 System
*sys
= mem
.system();
145 const auto max_requestors
= sys
->maxRequestors();
148 .init(max_requestors
)
149 .flags(total
| nozero
| nonan
)
151 for (int i
= 0; i
< max_requestors
; i
++) {
152 bytesRead
.subname(i
, sys
->getRequestorName(i
));
156 .init(max_requestors
)
157 .flags(total
| nozero
| nonan
)
159 for (int i
= 0; i
< max_requestors
; i
++) {
160 bytesInstRead
.subname(i
, sys
->getRequestorName(i
));
164 .init(max_requestors
)
165 .flags(total
| nozero
| nonan
)
167 for (int i
= 0; i
< max_requestors
; i
++) {
168 bytesWritten
.subname(i
, sys
->getRequestorName(i
));
172 .init(max_requestors
)
173 .flags(total
| nozero
| nonan
)
175 for (int i
= 0; i
< max_requestors
; i
++) {
176 numReads
.subname(i
, sys
->getRequestorName(i
));
180 .init(max_requestors
)
181 .flags(total
| nozero
| nonan
)
183 for (int i
= 0; i
< max_requestors
; i
++) {
184 numWrites
.subname(i
, sys
->getRequestorName(i
));
188 .init(max_requestors
)
189 .flags(total
| nozero
| nonan
)
191 for (int i
= 0; i
< max_requestors
; i
++) {
192 numOther
.subname(i
, sys
->getRequestorName(i
));
198 .flags(total
| nozero
| nonan
)
200 for (int i
= 0; i
< max_requestors
; i
++) {
201 bwRead
.subname(i
, sys
->getRequestorName(i
));
206 .prereq(bytesInstRead
)
207 .flags(total
| nozero
| nonan
)
209 for (int i
= 0; i
< max_requestors
; i
++) {
210 bwInstRead
.subname(i
, sys
->getRequestorName(i
));
215 .prereq(bytesWritten
)
216 .flags(total
| nozero
| nonan
)
218 for (int i
= 0; i
< max_requestors
; i
++) {
219 bwWrite
.subname(i
, sys
->getRequestorName(i
));
225 .flags(total
| nozero
| nonan
)
227 for (int i
= 0; i
< max_requestors
; i
++) {
228 bwTotal
.subname(i
, sys
->getRequestorName(i
));
231 bwRead
= bytesRead
/ simSeconds
;
232 bwInstRead
= bytesInstRead
/ simSeconds
;
233 bwWrite
= bytesWritten
/ simSeconds
;
234 bwTotal
= (bytesRead
+ bytesWritten
) / simSeconds
;
238 AbstractMemory::getAddrRange() const
243 // Add load-locked to tracking list. Should only be called if the
244 // operation is a load and the LLSC flag is set.
246 AbstractMemory::trackLoadLocked(PacketPtr pkt
)
248 const RequestPtr
&req
= pkt
->req
;
249 Addr paddr
= LockedAddr::mask(req
->getPaddr());
251 // first we check if we already have a locked addr for this
252 // xc. Since each xc only gets one, we just update the
253 // existing record with the new address.
254 std::list
<LockedAddr
>::iterator i
;
256 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
257 if (i
->matchesContext(req
)) {
258 DPRINTF(LLSC
, "Modifying lock record: context %d addr %#x\n",
259 req
->contextId(), paddr
);
265 // no record for this xc: need to allocate a new one
266 DPRINTF(LLSC
, "Adding lock record: context %d addr %#x\n",
267 req
->contextId(), paddr
);
268 lockedAddrList
.push_front(LockedAddr(req
));
269 backdoor
.invalidate();
273 // Called on *writes* only... both regular stores and
274 // store-conditional operations. Check for conventional stores which
275 // conflict with locked addresses, and for success/failure of store
278 AbstractMemory::checkLockedAddrList(PacketPtr pkt
)
280 const RequestPtr
&req
= pkt
->req
;
281 Addr paddr
= LockedAddr::mask(req
->getPaddr());
282 bool isLLSC
= pkt
->isLLSC();
284 // Initialize return value. Non-conditional stores always
285 // succeed. Assume conditional stores will fail until proven
287 bool allowStore
= !isLLSC
;
289 // Iterate over list. Note that there could be multiple matching records,
290 // as more than one context could have done a load locked to this location.
291 // Only remove records when we succeed in finding a record for (xc, addr);
292 // then, remove all records with this address. Failed store-conditionals do
293 // not blow unrelated reservations.
294 std::list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
297 while (i
!= lockedAddrList
.end()) {
298 if (i
->addr
== paddr
&& i
->matchesContext(req
)) {
299 // it's a store conditional, and as far as the memory system can
300 // tell, the requesting context's lock is still valid.
301 DPRINTF(LLSC
, "StCond success: context %d addr %#x\n",
302 req
->contextId(), paddr
);
306 // If we didn't find a match, keep searching! Someone else may well
307 // have a reservation on this line here but we may find ours in just
311 req
->setExtraData(allowStore
? 1 : 0);
313 // LLSCs that succeeded AND non-LLSC stores both fall into here:
315 // We write address paddr. However, there may be several entries with a
316 // reservation on this address (for other contextIds) and they must all
318 i
= lockedAddrList
.begin();
319 while (i
!= lockedAddrList
.end()) {
320 if (i
->addr
== paddr
) {
321 DPRINTF(LLSC
, "Erasing lock record: context %d addr %#x\n",
322 i
->contextId
, paddr
);
323 ContextID owner_cid
= i
->contextId
;
324 assert(owner_cid
!= InvalidContextID
);
325 ContextID requestor_cid
= req
->hasContextId() ?
328 if (owner_cid
!= requestor_cid
) {
329 ThreadContext
* ctx
= system()->threads
[owner_cid
];
330 TheISA::globalClearExclusive(ctx
);
332 i
= lockedAddrList
.erase(i
);
344 tracePacket(System
*sys
, const char *label
, PacketPtr pkt
)
346 int size
= pkt
->getSize();
347 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
348 ByteOrder byte_order
= sys
->getGuestByteOrder();
349 DPRINTF(MemoryAccess
, "%s from %s of size %i on address %#x data "
350 "%#x %c\n", label
, sys
->getRequestorName(pkt
->req
->
351 requestorId()), size
, pkt
->getAddr(),
352 size
, pkt
->getAddr(), pkt
->getUintX(byte_order
),
353 pkt
->req
->isUncacheable() ? 'U' : 'C');
356 DPRINTF(MemoryAccess
, "%s from %s of size %i on address %#x %c\n",
357 label
, sys
->getRequestorName(pkt
->req
->requestorId()),
358 size
, pkt
->getAddr(), pkt
->req
->isUncacheable() ? 'U' : 'C');
359 DDUMP(MemoryAccess
, pkt
->getConstPtr
<uint8_t>(), pkt
->getSize());
362 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
364 # define TRACE_PACKET(A)
368 AbstractMemory::access(PacketPtr pkt
)
370 if (pkt
->cacheResponding()) {
371 DPRINTF(MemoryAccess
, "Cache responding to %#llx: not responding\n",
376 if (pkt
->cmd
== MemCmd::CleanEvict
|| pkt
->cmd
== MemCmd::WritebackClean
) {
377 DPRINTF(MemoryAccess
, "CleanEvict on 0x%x: not responding\n",
382 assert(pkt
->getAddrRange().isSubset(range
));
384 uint8_t *host_addr
= toHostAddr(pkt
->getAddr());
386 if (pkt
->cmd
== MemCmd::SwapReq
) {
387 if (pkt
->isAtomicOp()) {
389 pkt
->setData(host_addr
);
390 (*(pkt
->getAtomicOp()))(host_addr
);
393 std::vector
<uint8_t> overwrite_val(pkt
->getSize());
394 uint64_t condition_val64
;
395 uint32_t condition_val32
;
397 panic_if(!pmemAddr
, "Swap only works if there is real memory " \
398 "(i.e. null=False)");
400 bool overwrite_mem
= true;
401 // keep a copy of our possible write value, and copy what is at the
402 // memory address into the packet
403 pkt
->writeData(&overwrite_val
[0]);
404 pkt
->setData(host_addr
);
406 if (pkt
->req
->isCondSwap()) {
407 if (pkt
->getSize() == sizeof(uint64_t)) {
408 condition_val64
= pkt
->req
->getExtraData();
409 overwrite_mem
= !std::memcmp(&condition_val64
, host_addr
,
411 } else if (pkt
->getSize() == sizeof(uint32_t)) {
412 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
413 overwrite_mem
= !std::memcmp(&condition_val32
, host_addr
,
416 panic("Invalid size for conditional read/write\n");
420 std::memcpy(host_addr
, &overwrite_val
[0], pkt
->getSize());
422 assert(!pkt
->req
->isInstFetch());
423 TRACE_PACKET("Read/Write");
424 stats
.numOther
[pkt
->req
->requestorId()]++;
426 } else if (pkt
->isRead()) {
427 assert(!pkt
->isWrite());
429 assert(!pkt
->fromCache());
430 // if the packet is not coming from a cache then we have
431 // to do the LL/SC tracking here
432 trackLoadLocked(pkt
);
435 pkt
->setData(host_addr
);
437 TRACE_PACKET(pkt
->req
->isInstFetch() ? "IFetch" : "Read");
438 stats
.numReads
[pkt
->req
->requestorId()]++;
439 stats
.bytesRead
[pkt
->req
->requestorId()] += pkt
->getSize();
440 if (pkt
->req
->isInstFetch())
441 stats
.bytesInstRead
[pkt
->req
->requestorId()] += pkt
->getSize();
442 } else if (pkt
->isInvalidate() || pkt
->isClean()) {
443 assert(!pkt
->isWrite());
444 // in a fastmem system invalidating and/or cleaning packets
445 // can be seen due to cache maintenance requests
447 // no need to do anything
448 } else if (pkt
->isWrite()) {
451 pkt
->writeData(host_addr
);
452 DPRINTF(MemoryAccess
, "%s write due to %s\n",
453 __func__
, pkt
->print());
455 assert(!pkt
->req
->isInstFetch());
456 TRACE_PACKET("Write");
457 stats
.numWrites
[pkt
->req
->requestorId()]++;
458 stats
.bytesWritten
[pkt
->req
->requestorId()] += pkt
->getSize();
461 panic("Unexpected packet %s", pkt
->print());
464 if (pkt
->needsResponse()) {
470 AbstractMemory::functionalAccess(PacketPtr pkt
)
472 assert(pkt
->getAddrRange().isSubset(range
));
474 uint8_t *host_addr
= toHostAddr(pkt
->getAddr());
478 pkt
->setData(host_addr
);
480 TRACE_PACKET("Read");
482 } else if (pkt
->isWrite()) {
484 pkt
->writeData(host_addr
);
486 TRACE_PACKET("Write");
488 } else if (pkt
->isPrint()) {
489 Packet::PrintReqState
*prs
=
490 dynamic_cast<Packet::PrintReqState
*>(pkt
->senderState
);
492 // Need to call printLabels() explicitly since we're not going
493 // through printObj().
495 // Right now we just print the single byte at the specified address.
496 ccprintf(prs
->os
, "%s%#x\n", prs
->curPrefix(), *host_addr
);
498 panic("AbstractMemory: unimplemented functional command %s",