misc: Merged release-staging-v19.0.0.0 into develop
[gem5.git] / src / mem / abstract_mem.cc
1 /*
2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/abstract_mem.hh"
42
43 #include <vector>
44
45 #include "arch/locked_mem.hh"
46 #include "cpu/base.hh"
47 #include "cpu/thread_context.hh"
48 #include "debug/LLSC.hh"
49 #include "debug/MemoryAccess.hh"
50 #include "mem/packet_access.hh"
51 #include "sim/system.hh"
52
53 using namespace std;
54
55 AbstractMemory::AbstractMemory(const Params *p) :
56 ClockedObject(p), range(params()->range), pmemAddr(NULL),
57 backdoor(params()->range, nullptr,
58 (MemBackdoor::Flags)(MemBackdoor::Readable |
59 MemBackdoor::Writeable)),
60 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
61 kvmMap(p->kvm_map), _system(NULL),
62 stats(*this)
63 {
64 }
65
66 void
67 AbstractMemory::init()
68 {
69 assert(system());
70
71 if (size() % _system->getPageBytes() != 0)
72 panic("Memory Size not divisible by page size\n");
73 }
74
75 void
76 AbstractMemory::setBackingStore(uint8_t* pmem_addr)
77 {
78 // If there was an existing backdoor, let everybody know it's going away.
79 if (backdoor.ptr())
80 backdoor.invalidate();
81
82 // The back door can't handle interleaved memory.
83 backdoor.ptr(range.interleaved() ? nullptr : pmem_addr);
84
85 pmemAddr = pmem_addr;
86 }
87
88 AbstractMemory::MemStats::MemStats(AbstractMemory &_mem)
89 : Stats::Group(&_mem), mem(_mem),
90 bytesRead(this, "bytes_read",
91 "Number of bytes read from this memory"),
92 bytesInstRead(this, "bytes_inst_read",
93 "Number of instructions bytes read from this memory"),
94 bytesWritten(this, "bytes_written",
95 "Number of bytes written to this memory"),
96 numReads(this, "num_reads",
97 "Number of read requests responded to by this memory"),
98 numWrites(this, "num_writes",
99 "Number of write requests responded to by this memory"),
100 numOther(this, "num_other",
101 "Number of other requests responded to by this memory"),
102 bwRead(this, "bw_read",
103 "Total read bandwidth from this memory (bytes/s)"),
104 bwInstRead(this, "bw_inst_read",
105 "Instruction read bandwidth from this memory (bytes/s)"),
106 bwWrite(this, "bw_write",
107 "Write bandwidth from this memory (bytes/s)"),
108 bwTotal(this, "bw_total",
109 "Total bandwidth to/from this memory (bytes/s)")
110 {
111 }
112
113 void
114 AbstractMemory::MemStats::regStats()
115 {
116 using namespace Stats;
117
118 Stats::Group::regStats();
119
120 System *sys = mem.system();
121 assert(sys);
122 const auto max_masters = sys->maxMasters();
123
124 bytesRead
125 .init(max_masters)
126 .flags(total | nozero | nonan)
127 ;
128 for (int i = 0; i < max_masters; i++) {
129 bytesRead.subname(i, sys->getMasterName(i));
130 }
131
132 bytesInstRead
133 .init(max_masters)
134 .flags(total | nozero | nonan)
135 ;
136 for (int i = 0; i < max_masters; i++) {
137 bytesInstRead.subname(i, sys->getMasterName(i));
138 }
139
140 bytesWritten
141 .init(max_masters)
142 .flags(total | nozero | nonan)
143 ;
144 for (int i = 0; i < max_masters; i++) {
145 bytesWritten.subname(i, sys->getMasterName(i));
146 }
147
148 numReads
149 .init(max_masters)
150 .flags(total | nozero | nonan)
151 ;
152 for (int i = 0; i < max_masters; i++) {
153 numReads.subname(i, sys->getMasterName(i));
154 }
155
156 numWrites
157 .init(max_masters)
158 .flags(total | nozero | nonan)
159 ;
160 for (int i = 0; i < max_masters; i++) {
161 numWrites.subname(i, sys->getMasterName(i));
162 }
163
164 numOther
165 .init(max_masters)
166 .flags(total | nozero | nonan)
167 ;
168 for (int i = 0; i < max_masters; i++) {
169 numOther.subname(i, sys->getMasterName(i));
170 }
171
172 bwRead
173 .precision(0)
174 .prereq(bytesRead)
175 .flags(total | nozero | nonan)
176 ;
177 for (int i = 0; i < max_masters; i++) {
178 bwRead.subname(i, sys->getMasterName(i));
179 }
180
181 bwInstRead
182 .precision(0)
183 .prereq(bytesInstRead)
184 .flags(total | nozero | nonan)
185 ;
186 for (int i = 0; i < max_masters; i++) {
187 bwInstRead.subname(i, sys->getMasterName(i));
188 }
189
190 bwWrite
191 .precision(0)
192 .prereq(bytesWritten)
193 .flags(total | nozero | nonan)
194 ;
195 for (int i = 0; i < max_masters; i++) {
196 bwWrite.subname(i, sys->getMasterName(i));
197 }
198
199 bwTotal
200 .precision(0)
201 .prereq(bwTotal)
202 .flags(total | nozero | nonan)
203 ;
204 for (int i = 0; i < max_masters; i++) {
205 bwTotal.subname(i, sys->getMasterName(i));
206 }
207
208 bwRead = bytesRead / simSeconds;
209 bwInstRead = bytesInstRead / simSeconds;
210 bwWrite = bytesWritten / simSeconds;
211 bwTotal = (bytesRead + bytesWritten) / simSeconds;
212 }
213
214 AddrRange
215 AbstractMemory::getAddrRange() const
216 {
217 return range;
218 }
219
220 // Add load-locked to tracking list. Should only be called if the
221 // operation is a load and the LLSC flag is set.
222 void
223 AbstractMemory::trackLoadLocked(PacketPtr pkt)
224 {
225 const RequestPtr &req = pkt->req;
226 Addr paddr = LockedAddr::mask(req->getPaddr());
227
228 // first we check if we already have a locked addr for this
229 // xc. Since each xc only gets one, we just update the
230 // existing record with the new address.
231 list<LockedAddr>::iterator i;
232
233 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
234 if (i->matchesContext(req)) {
235 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
236 req->contextId(), paddr);
237 i->addr = paddr;
238 return;
239 }
240 }
241
242 // no record for this xc: need to allocate a new one
243 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
244 req->contextId(), paddr);
245 lockedAddrList.push_front(LockedAddr(req));
246 }
247
248
249 // Called on *writes* only... both regular stores and
250 // store-conditional operations. Check for conventional stores which
251 // conflict with locked addresses, and for success/failure of store
252 // conditionals.
253 bool
254 AbstractMemory::checkLockedAddrList(PacketPtr pkt)
255 {
256 const RequestPtr &req = pkt->req;
257 Addr paddr = LockedAddr::mask(req->getPaddr());
258 bool isLLSC = pkt->isLLSC();
259
260 // Initialize return value. Non-conditional stores always
261 // succeed. Assume conditional stores will fail until proven
262 // otherwise.
263 bool allowStore = !isLLSC;
264
265 // Iterate over list. Note that there could be multiple matching records,
266 // as more than one context could have done a load locked to this location.
267 // Only remove records when we succeed in finding a record for (xc, addr);
268 // then, remove all records with this address. Failed store-conditionals do
269 // not blow unrelated reservations.
270 list<LockedAddr>::iterator i = lockedAddrList.begin();
271
272 if (isLLSC) {
273 while (i != lockedAddrList.end()) {
274 if (i->addr == paddr && i->matchesContext(req)) {
275 // it's a store conditional, and as far as the memory system can
276 // tell, the requesting context's lock is still valid.
277 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
278 req->contextId(), paddr);
279 allowStore = true;
280 break;
281 }
282 // If we didn't find a match, keep searching! Someone else may well
283 // have a reservation on this line here but we may find ours in just
284 // a little while.
285 i++;
286 }
287 req->setExtraData(allowStore ? 1 : 0);
288 }
289 // LLSCs that succeeded AND non-LLSC stores both fall into here:
290 if (allowStore) {
291 // We write address paddr. However, there may be several entries with a
292 // reservation on this address (for other contextIds) and they must all
293 // be removed.
294 i = lockedAddrList.begin();
295 while (i != lockedAddrList.end()) {
296 if (i->addr == paddr) {
297 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
298 i->contextId, paddr);
299 ContextID owner_cid = i->contextId;
300 assert(owner_cid != InvalidContextID);
301 ContextID requester_cid = req->hasContextId() ?
302 req->contextId() :
303 InvalidContextID;
304 if (owner_cid != requester_cid) {
305 ThreadContext* ctx = system()->getThreadContext(owner_cid);
306 TheISA::globalClearExclusive(ctx);
307 }
308 i = lockedAddrList.erase(i);
309 } else {
310 i++;
311 }
312 }
313 }
314
315 return allowStore;
316 }
317
318 #if TRACING_ON
319 static inline void
320 tracePacket(System *sys, const char *label, PacketPtr pkt)
321 {
322 int size = pkt->getSize();
323 #if THE_ISA != NULL_ISA
324 if (size == 1 || size == 2 || size == 4 || size == 8) {
325 DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
326 "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()),
327 size, pkt->getAddr(), pkt->getUintX(TheISA::GuestByteOrder),
328 pkt->req->isUncacheable() ? 'U' : 'C');
329 return;
330 }
331 #endif
332 DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
333 label, sys->getMasterName(pkt->req->masterId()),
334 size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
335 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
336 }
337
338 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
339 #else
340 # define TRACE_PACKET(A)
341 #endif
342
343 void
344 AbstractMemory::access(PacketPtr pkt)
345 {
346 if (pkt->cacheResponding()) {
347 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
348 pkt->getAddr());
349 return;
350 }
351
352 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
353 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
354 pkt->getAddr());
355 return;
356 }
357
358 assert(pkt->getAddrRange().isSubset(range));
359
360 uint8_t *host_addr = toHostAddr(pkt->getAddr());
361
362 if (pkt->cmd == MemCmd::SwapReq) {
363 if (pkt->isAtomicOp()) {
364 if (pmemAddr) {
365 pkt->setData(host_addr);
366 (*(pkt->getAtomicOp()))(host_addr);
367 }
368 } else {
369 std::vector<uint8_t> overwrite_val(pkt->getSize());
370 uint64_t condition_val64;
371 uint32_t condition_val32;
372
373 panic_if(!pmemAddr, "Swap only works if there is real memory " \
374 "(i.e. null=False)");
375
376 bool overwrite_mem = true;
377 // keep a copy of our possible write value, and copy what is at the
378 // memory address into the packet
379 pkt->writeData(&overwrite_val[0]);
380 pkt->setData(host_addr);
381
382 if (pkt->req->isCondSwap()) {
383 if (pkt->getSize() == sizeof(uint64_t)) {
384 condition_val64 = pkt->req->getExtraData();
385 overwrite_mem = !std::memcmp(&condition_val64, host_addr,
386 sizeof(uint64_t));
387 } else if (pkt->getSize() == sizeof(uint32_t)) {
388 condition_val32 = (uint32_t)pkt->req->getExtraData();
389 overwrite_mem = !std::memcmp(&condition_val32, host_addr,
390 sizeof(uint32_t));
391 } else
392 panic("Invalid size for conditional read/write\n");
393 }
394
395 if (overwrite_mem)
396 std::memcpy(host_addr, &overwrite_val[0], pkt->getSize());
397
398 assert(!pkt->req->isInstFetch());
399 TRACE_PACKET("Read/Write");
400 stats.numOther[pkt->req->masterId()]++;
401 }
402 } else if (pkt->isRead()) {
403 assert(!pkt->isWrite());
404 if (pkt->isLLSC()) {
405 assert(!pkt->fromCache());
406 // if the packet is not coming from a cache then we have
407 // to do the LL/SC tracking here
408 trackLoadLocked(pkt);
409 }
410 if (pmemAddr) {
411 pkt->setData(host_addr);
412 }
413 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
414 stats.numReads[pkt->req->masterId()]++;
415 stats.bytesRead[pkt->req->masterId()] += pkt->getSize();
416 if (pkt->req->isInstFetch())
417 stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize();
418 } else if (pkt->isInvalidate() || pkt->isClean()) {
419 assert(!pkt->isWrite());
420 // in a fastmem system invalidating and/or cleaning packets
421 // can be seen due to cache maintenance requests
422
423 // no need to do anything
424 } else if (pkt->isWrite()) {
425 if (writeOK(pkt)) {
426 if (pmemAddr) {
427 pkt->writeData(host_addr);
428 DPRINTF(MemoryAccess, "%s write due to %s\n",
429 __func__, pkt->print());
430 }
431 assert(!pkt->req->isInstFetch());
432 TRACE_PACKET("Write");
433 stats.numWrites[pkt->req->masterId()]++;
434 stats.bytesWritten[pkt->req->masterId()] += pkt->getSize();
435 }
436 } else {
437 panic("Unexpected packet %s", pkt->print());
438 }
439
440 if (pkt->needsResponse()) {
441 pkt->makeResponse();
442 }
443 }
444
445 void
446 AbstractMemory::functionalAccess(PacketPtr pkt)
447 {
448 assert(pkt->getAddrRange().isSubset(range));
449
450 uint8_t *host_addr = toHostAddr(pkt->getAddr());
451
452 if (pkt->isRead()) {
453 if (pmemAddr) {
454 pkt->setData(host_addr);
455 }
456 TRACE_PACKET("Read");
457 pkt->makeResponse();
458 } else if (pkt->isWrite()) {
459 if (pmemAddr) {
460 pkt->writeData(host_addr);
461 }
462 TRACE_PACKET("Write");
463 pkt->makeResponse();
464 } else if (pkt->isPrint()) {
465 Packet::PrintReqState *prs =
466 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
467 assert(prs);
468 // Need to call printLabels() explicitly since we're not going
469 // through printObj().
470 prs->printLabels();
471 // Right now we just print the single byte at the specified address.
472 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *host_addr);
473 } else {
474 panic("AbstractMemory: unimplemented functional command %s",
475 pkt->cmdString());
476 }
477 }