2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Ron Dreslinski
45 #include "mem/abstract_mem.hh"
49 #include "arch/locked_mem.hh"
50 #include "cpu/base.hh"
51 #include "cpu/thread_context.hh"
52 #include "debug/LLSC.hh"
53 #include "debug/MemoryAccess.hh"
54 #include "mem/packet_access.hh"
55 #include "sim/system.hh"
59 AbstractMemory::AbstractMemory(const Params
*p
) :
60 ClockedObject(p
), range(params()->range
), pmemAddr(NULL
),
61 backdoor(params()->range
, nullptr,
62 (MemBackdoor::Flags
)(MemBackdoor::Readable
|
63 MemBackdoor::Writeable
)),
64 confTableReported(p
->conf_table_reported
), inAddrMap(p
->in_addr_map
),
65 kvmMap(p
->kvm_map
), _system(NULL
)
70 AbstractMemory::init()
74 if (size() % _system
->getPageBytes() != 0)
75 panic("Memory Size not divisible by page size\n");
79 AbstractMemory::setBackingStore(uint8_t* pmem_addr
)
81 // If there was an existing backdoor, let everybody know it's going away.
83 backdoor
.invalidate();
85 // The back door can't handle interleaved memory.
86 backdoor
.ptr(range
.interleaved() ? nullptr : pmem_addr
);
92 AbstractMemory::regStats()
94 ClockedObject::regStats();
96 using namespace Stats
;
101 .init(system()->maxMasters())
102 .name(name() + ".bytes_read")
103 .desc("Number of bytes read from this memory")
104 .flags(total
| nozero
| nonan
)
106 for (int i
= 0; i
< system()->maxMasters(); i
++) {
107 bytesRead
.subname(i
, system()->getMasterName(i
));
110 .init(system()->maxMasters())
111 .name(name() + ".bytes_inst_read")
112 .desc("Number of instructions bytes read from this memory")
113 .flags(total
| nozero
| nonan
)
115 for (int i
= 0; i
< system()->maxMasters(); i
++) {
116 bytesInstRead
.subname(i
, system()->getMasterName(i
));
119 .init(system()->maxMasters())
120 .name(name() + ".bytes_written")
121 .desc("Number of bytes written to this memory")
122 .flags(total
| nozero
| nonan
)
124 for (int i
= 0; i
< system()->maxMasters(); i
++) {
125 bytesWritten
.subname(i
, system()->getMasterName(i
));
128 .init(system()->maxMasters())
129 .name(name() + ".num_reads")
130 .desc("Number of read requests responded to by this memory")
131 .flags(total
| nozero
| nonan
)
133 for (int i
= 0; i
< system()->maxMasters(); i
++) {
134 numReads
.subname(i
, system()->getMasterName(i
));
137 .init(system()->maxMasters())
138 .name(name() + ".num_writes")
139 .desc("Number of write requests responded to by this memory")
140 .flags(total
| nozero
| nonan
)
142 for (int i
= 0; i
< system()->maxMasters(); i
++) {
143 numWrites
.subname(i
, system()->getMasterName(i
));
146 .init(system()->maxMasters())
147 .name(name() + ".num_other")
148 .desc("Number of other requests responded to by this memory")
149 .flags(total
| nozero
| nonan
)
151 for (int i
= 0; i
< system()->maxMasters(); i
++) {
152 numOther
.subname(i
, system()->getMasterName(i
));
155 .name(name() + ".bw_read")
156 .desc("Total read bandwidth from this memory (bytes/s)")
159 .flags(total
| nozero
| nonan
)
161 for (int i
= 0; i
< system()->maxMasters(); i
++) {
162 bwRead
.subname(i
, system()->getMasterName(i
));
166 .name(name() + ".bw_inst_read")
167 .desc("Instruction read bandwidth from this memory (bytes/s)")
169 .prereq(bytesInstRead
)
170 .flags(total
| nozero
| nonan
)
172 for (int i
= 0; i
< system()->maxMasters(); i
++) {
173 bwInstRead
.subname(i
, system()->getMasterName(i
));
176 .name(name() + ".bw_write")
177 .desc("Write bandwidth from this memory (bytes/s)")
179 .prereq(bytesWritten
)
180 .flags(total
| nozero
| nonan
)
182 for (int i
= 0; i
< system()->maxMasters(); i
++) {
183 bwWrite
.subname(i
, system()->getMasterName(i
));
186 .name(name() + ".bw_total")
187 .desc("Total bandwidth to/from this memory (bytes/s)")
190 .flags(total
| nozero
| nonan
)
192 for (int i
= 0; i
< system()->maxMasters(); i
++) {
193 bwTotal
.subname(i
, system()->getMasterName(i
));
195 bwRead
= bytesRead
/ simSeconds
;
196 bwInstRead
= bytesInstRead
/ simSeconds
;
197 bwWrite
= bytesWritten
/ simSeconds
;
198 bwTotal
= (bytesRead
+ bytesWritten
) / simSeconds
;
202 AbstractMemory::getAddrRange() const
207 // Add load-locked to tracking list. Should only be called if the
208 // operation is a load and the LLSC flag is set.
210 AbstractMemory::trackLoadLocked(PacketPtr pkt
)
212 const RequestPtr
&req
= pkt
->req
;
213 Addr paddr
= LockedAddr::mask(req
->getPaddr());
215 // first we check if we already have a locked addr for this
216 // xc. Since each xc only gets one, we just update the
217 // existing record with the new address.
218 list
<LockedAddr
>::iterator i
;
220 for (i
= lockedAddrList
.begin(); i
!= lockedAddrList
.end(); ++i
) {
221 if (i
->matchesContext(req
)) {
222 DPRINTF(LLSC
, "Modifying lock record: context %d addr %#x\n",
223 req
->contextId(), paddr
);
229 // no record for this xc: need to allocate a new one
230 DPRINTF(LLSC
, "Adding lock record: context %d addr %#x\n",
231 req
->contextId(), paddr
);
232 lockedAddrList
.push_front(LockedAddr(req
));
236 // Called on *writes* only... both regular stores and
237 // store-conditional operations. Check for conventional stores which
238 // conflict with locked addresses, and for success/failure of store
241 AbstractMemory::checkLockedAddrList(PacketPtr pkt
)
243 const RequestPtr
&req
= pkt
->req
;
244 Addr paddr
= LockedAddr::mask(req
->getPaddr());
245 bool isLLSC
= pkt
->isLLSC();
247 // Initialize return value. Non-conditional stores always
248 // succeed. Assume conditional stores will fail until proven
250 bool allowStore
= !isLLSC
;
252 // Iterate over list. Note that there could be multiple matching records,
253 // as more than one context could have done a load locked to this location.
254 // Only remove records when we succeed in finding a record for (xc, addr);
255 // then, remove all records with this address. Failed store-conditionals do
256 // not blow unrelated reservations.
257 list
<LockedAddr
>::iterator i
= lockedAddrList
.begin();
260 while (i
!= lockedAddrList
.end()) {
261 if (i
->addr
== paddr
&& i
->matchesContext(req
)) {
262 // it's a store conditional, and as far as the memory system can
263 // tell, the requesting context's lock is still valid.
264 DPRINTF(LLSC
, "StCond success: context %d addr %#x\n",
265 req
->contextId(), paddr
);
269 // If we didn't find a match, keep searching! Someone else may well
270 // have a reservation on this line here but we may find ours in just
274 req
->setExtraData(allowStore
? 1 : 0);
276 // LLSCs that succeeded AND non-LLSC stores both fall into here:
278 // We write address paddr. However, there may be several entries with a
279 // reservation on this address (for other contextIds) and they must all
281 i
= lockedAddrList
.begin();
282 while (i
!= lockedAddrList
.end()) {
283 if (i
->addr
== paddr
) {
284 DPRINTF(LLSC
, "Erasing lock record: context %d addr %#x\n",
285 i
->contextId
, paddr
);
286 ContextID owner_cid
= i
->contextId
;
287 assert(owner_cid
!= InvalidContextID
);
288 ContextID requester_cid
= req
->hasContextId() ?
291 if (owner_cid
!= requester_cid
) {
292 ThreadContext
* ctx
= system()->getThreadContext(owner_cid
);
293 TheISA::globalClearExclusive(ctx
);
295 i
= lockedAddrList
.erase(i
);
307 tracePacket(System
*sys
, const char *label
, PacketPtr pkt
)
309 int size
= pkt
->getSize();
310 #if THE_ISA != NULL_ISA
311 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
312 DPRINTF(MemoryAccess
,"%s from %s of size %i on address %#x data "
313 "%#x %c\n", label
, sys
->getMasterName(pkt
->req
->masterId()),
314 size
, pkt
->getAddr(), pkt
->getUintX(TheISA::GuestByteOrder
),
315 pkt
->req
->isUncacheable() ? 'U' : 'C');
319 DPRINTF(MemoryAccess
, "%s from %s of size %i on address %#x %c\n",
320 label
, sys
->getMasterName(pkt
->req
->masterId()),
321 size
, pkt
->getAddr(), pkt
->req
->isUncacheable() ? 'U' : 'C');
322 DDUMP(MemoryAccess
, pkt
->getConstPtr
<uint8_t>(), pkt
->getSize());
325 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
327 # define TRACE_PACKET(A)
331 AbstractMemory::access(PacketPtr pkt
)
333 if (pkt
->cacheResponding()) {
334 DPRINTF(MemoryAccess
, "Cache responding to %#llx: not responding\n",
339 if (pkt
->cmd
== MemCmd::CleanEvict
|| pkt
->cmd
== MemCmd::WritebackClean
) {
340 DPRINTF(MemoryAccess
, "CleanEvict on 0x%x: not responding\n",
345 assert(pkt
->getAddrRange().isSubset(range
));
347 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - range
.start();
349 if (pkt
->cmd
== MemCmd::SwapReq
) {
350 if (pkt
->isAtomicOp()) {
352 pkt
->setData(hostAddr
);
353 (*(pkt
->getAtomicOp()))(hostAddr
);
356 std::vector
<uint8_t> overwrite_val(pkt
->getSize());
357 uint64_t condition_val64
;
358 uint32_t condition_val32
;
360 panic_if(!pmemAddr
, "Swap only works if there is real memory " \
361 "(i.e. null=False)");
363 bool overwrite_mem
= true;
364 // keep a copy of our possible write value, and copy what is at the
365 // memory address into the packet
366 pkt
->writeData(&overwrite_val
[0]);
367 pkt
->setData(hostAddr
);
369 if (pkt
->req
->isCondSwap()) {
370 if (pkt
->getSize() == sizeof(uint64_t)) {
371 condition_val64
= pkt
->req
->getExtraData();
372 overwrite_mem
= !std::memcmp(&condition_val64
, hostAddr
,
374 } else if (pkt
->getSize() == sizeof(uint32_t)) {
375 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
376 overwrite_mem
= !std::memcmp(&condition_val32
, hostAddr
,
379 panic("Invalid size for conditional read/write\n");
383 std::memcpy(hostAddr
, &overwrite_val
[0], pkt
->getSize());
385 assert(!pkt
->req
->isInstFetch());
386 TRACE_PACKET("Read/Write");
387 numOther
[pkt
->req
->masterId()]++;
389 } else if (pkt
->isRead()) {
390 assert(!pkt
->isWrite());
392 assert(!pkt
->fromCache());
393 // if the packet is not coming from a cache then we have
394 // to do the LL/SC tracking here
395 trackLoadLocked(pkt
);
398 pkt
->setData(hostAddr
);
400 TRACE_PACKET(pkt
->req
->isInstFetch() ? "IFetch" : "Read");
401 numReads
[pkt
->req
->masterId()]++;
402 bytesRead
[pkt
->req
->masterId()] += pkt
->getSize();
403 if (pkt
->req
->isInstFetch())
404 bytesInstRead
[pkt
->req
->masterId()] += pkt
->getSize();
405 } else if (pkt
->isInvalidate() || pkt
->isClean()) {
406 assert(!pkt
->isWrite());
407 // in a fastmem system invalidating and/or cleaning packets
408 // can be seen due to cache maintenance requests
410 // no need to do anything
411 } else if (pkt
->isWrite()) {
414 pkt
->writeData(hostAddr
);
415 DPRINTF(MemoryAccess
, "%s wrote %i bytes to address %x\n",
416 __func__
, pkt
->getSize(), pkt
->getAddr());
418 assert(!pkt
->req
->isInstFetch());
419 TRACE_PACKET("Write");
420 numWrites
[pkt
->req
->masterId()]++;
421 bytesWritten
[pkt
->req
->masterId()] += pkt
->getSize();
424 panic("Unexpected packet %s", pkt
->print());
427 if (pkt
->needsResponse()) {
433 AbstractMemory::functionalAccess(PacketPtr pkt
)
435 assert(pkt
->getAddrRange().isSubset(range
));
437 uint8_t *hostAddr
= pmemAddr
+ pkt
->getAddr() - range
.start();
441 pkt
->setData(hostAddr
);
443 TRACE_PACKET("Read");
445 } else if (pkt
->isWrite()) {
447 pkt
->writeData(hostAddr
);
449 TRACE_PACKET("Write");
451 } else if (pkt
->isPrint()) {
452 Packet::PrintReqState
*prs
=
453 dynamic_cast<Packet::PrintReqState
*>(pkt
->senderState
);
455 // Need to call printLabels() explicitly since we're not going
456 // through printObj().
458 // Right now we just print the single byte at the specified address.
459 ccprintf(prs
->os
, "%s%#x\n", prs
->curPrefix(), *hostAddr
);
461 panic("AbstractMemory: unimplemented functional command %s",