base, sim, mem, arch: Remove the dummy CPU in NULL
[gem5.git] / src / mem / abstract_mem.cc
1 /*
2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/abstract_mem.hh"
42
43 #include <vector>
44
45 #include "arch/locked_mem.hh"
46 #include "base/loader/memory_image.hh"
47 #include "base/loader/object_file.hh"
48 #include "cpu/thread_context.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/MemoryAccess.hh"
51 #include "mem/packet_access.hh"
52 #include "sim/system.hh"
53
54 using namespace std;
55
56 AbstractMemory::AbstractMemory(const Params *p) :
57 ClockedObject(p), range(params()->range), pmemAddr(NULL),
58 backdoor(params()->range, nullptr,
59 (MemBackdoor::Flags)(MemBackdoor::Readable |
60 MemBackdoor::Writeable)),
61 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
62 kvmMap(p->kvm_map), _system(NULL),
63 stats(*this)
64 {
65 panic_if(!range.valid() || !range.size(),
66 "Memory range %s must be valid with non-zero size.",
67 range.to_string());
68 }
69
70 void
71 AbstractMemory::initState()
72 {
73 ClockedObject::initState();
74
75 const auto &file = params()->image_file;
76 if (file == "")
77 return;
78
79 auto *object = Loader::createObjectFile(file, true);
80 fatal_if(!object, "%s: Could not load %s.", name(), file);
81
82 Loader::debugSymbolTable.insert(*object->symtab().globals());
83 Loader::MemoryImage image = object->buildImage();
84
85 AddrRange image_range(image.minAddr(), image.maxAddr());
86 if (!range.contains(image_range.start())) {
87 warn("%s: Moving image from %s to memory address range %s.",
88 name(), image_range.to_string(), range.to_string());
89 image = image.offset(range.start());
90 image_range = AddrRange(image.minAddr(), image.maxAddr());
91 }
92 panic_if(!image_range.isSubset(range), "%s: memory image %s doesn't fit.",
93 name(), file);
94
95 PortProxy proxy([this](PacketPtr pkt) { functionalAccess(pkt); }, size());
96
97 panic_if(!image.write(proxy), "%s: Unable to write image.");
98 }
99
100 void
101 AbstractMemory::setBackingStore(uint8_t* pmem_addr)
102 {
103 // If there was an existing backdoor, let everybody know it's going away.
104 if (backdoor.ptr())
105 backdoor.invalidate();
106
107 // The back door can't handle interleaved memory.
108 backdoor.ptr(range.interleaved() ? nullptr : pmem_addr);
109
110 pmemAddr = pmem_addr;
111 }
112
113 AbstractMemory::MemStats::MemStats(AbstractMemory &_mem)
114 : Stats::Group(&_mem), mem(_mem),
115 bytesRead(this, "bytes_read",
116 "Number of bytes read from this memory"),
117 bytesInstRead(this, "bytes_inst_read",
118 "Number of instructions bytes read from this memory"),
119 bytesWritten(this, "bytes_written",
120 "Number of bytes written to this memory"),
121 numReads(this, "num_reads",
122 "Number of read requests responded to by this memory"),
123 numWrites(this, "num_writes",
124 "Number of write requests responded to by this memory"),
125 numOther(this, "num_other",
126 "Number of other requests responded to by this memory"),
127 bwRead(this, "bw_read",
128 "Total read bandwidth from this memory (bytes/s)"),
129 bwInstRead(this, "bw_inst_read",
130 "Instruction read bandwidth from this memory (bytes/s)"),
131 bwWrite(this, "bw_write",
132 "Write bandwidth from this memory (bytes/s)"),
133 bwTotal(this, "bw_total",
134 "Total bandwidth to/from this memory (bytes/s)")
135 {
136 }
137
138 void
139 AbstractMemory::MemStats::regStats()
140 {
141 using namespace Stats;
142
143 Stats::Group::regStats();
144
145 System *sys = mem.system();
146 assert(sys);
147 const auto max_requestors = sys->maxRequestors();
148
149 bytesRead
150 .init(max_requestors)
151 .flags(total | nozero | nonan)
152 ;
153 for (int i = 0; i < max_requestors; i++) {
154 bytesRead.subname(i, sys->getRequestorName(i));
155 }
156
157 bytesInstRead
158 .init(max_requestors)
159 .flags(total | nozero | nonan)
160 ;
161 for (int i = 0; i < max_requestors; i++) {
162 bytesInstRead.subname(i, sys->getRequestorName(i));
163 }
164
165 bytesWritten
166 .init(max_requestors)
167 .flags(total | nozero | nonan)
168 ;
169 for (int i = 0; i < max_requestors; i++) {
170 bytesWritten.subname(i, sys->getRequestorName(i));
171 }
172
173 numReads
174 .init(max_requestors)
175 .flags(total | nozero | nonan)
176 ;
177 for (int i = 0; i < max_requestors; i++) {
178 numReads.subname(i, sys->getRequestorName(i));
179 }
180
181 numWrites
182 .init(max_requestors)
183 .flags(total | nozero | nonan)
184 ;
185 for (int i = 0; i < max_requestors; i++) {
186 numWrites.subname(i, sys->getRequestorName(i));
187 }
188
189 numOther
190 .init(max_requestors)
191 .flags(total | nozero | nonan)
192 ;
193 for (int i = 0; i < max_requestors; i++) {
194 numOther.subname(i, sys->getRequestorName(i));
195 }
196
197 bwRead
198 .precision(0)
199 .prereq(bytesRead)
200 .flags(total | nozero | nonan)
201 ;
202 for (int i = 0; i < max_requestors; i++) {
203 bwRead.subname(i, sys->getRequestorName(i));
204 }
205
206 bwInstRead
207 .precision(0)
208 .prereq(bytesInstRead)
209 .flags(total | nozero | nonan)
210 ;
211 for (int i = 0; i < max_requestors; i++) {
212 bwInstRead.subname(i, sys->getRequestorName(i));
213 }
214
215 bwWrite
216 .precision(0)
217 .prereq(bytesWritten)
218 .flags(total | nozero | nonan)
219 ;
220 for (int i = 0; i < max_requestors; i++) {
221 bwWrite.subname(i, sys->getRequestorName(i));
222 }
223
224 bwTotal
225 .precision(0)
226 .prereq(bwTotal)
227 .flags(total | nozero | nonan)
228 ;
229 for (int i = 0; i < max_requestors; i++) {
230 bwTotal.subname(i, sys->getRequestorName(i));
231 }
232
233 bwRead = bytesRead / simSeconds;
234 bwInstRead = bytesInstRead / simSeconds;
235 bwWrite = bytesWritten / simSeconds;
236 bwTotal = (bytesRead + bytesWritten) / simSeconds;
237 }
238
239 AddrRange
240 AbstractMemory::getAddrRange() const
241 {
242 return range;
243 }
244
245 // Add load-locked to tracking list. Should only be called if the
246 // operation is a load and the LLSC flag is set.
247 void
248 AbstractMemory::trackLoadLocked(PacketPtr pkt)
249 {
250 const RequestPtr &req = pkt->req;
251 Addr paddr = LockedAddr::mask(req->getPaddr());
252
253 // first we check if we already have a locked addr for this
254 // xc. Since each xc only gets one, we just update the
255 // existing record with the new address.
256 list<LockedAddr>::iterator i;
257
258 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
259 if (i->matchesContext(req)) {
260 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
261 req->contextId(), paddr);
262 i->addr = paddr;
263 return;
264 }
265 }
266
267 // no record for this xc: need to allocate a new one
268 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
269 req->contextId(), paddr);
270 lockedAddrList.push_front(LockedAddr(req));
271 }
272
273
274 // Called on *writes* only... both regular stores and
275 // store-conditional operations. Check for conventional stores which
276 // conflict with locked addresses, and for success/failure of store
277 // conditionals.
278 bool
279 AbstractMemory::checkLockedAddrList(PacketPtr pkt)
280 {
281 const RequestPtr &req = pkt->req;
282 Addr paddr = LockedAddr::mask(req->getPaddr());
283 bool isLLSC = pkt->isLLSC();
284
285 // Initialize return value. Non-conditional stores always
286 // succeed. Assume conditional stores will fail until proven
287 // otherwise.
288 bool allowStore = !isLLSC;
289
290 // Iterate over list. Note that there could be multiple matching records,
291 // as more than one context could have done a load locked to this location.
292 // Only remove records when we succeed in finding a record for (xc, addr);
293 // then, remove all records with this address. Failed store-conditionals do
294 // not blow unrelated reservations.
295 list<LockedAddr>::iterator i = lockedAddrList.begin();
296
297 if (isLLSC) {
298 while (i != lockedAddrList.end()) {
299 if (i->addr == paddr && i->matchesContext(req)) {
300 // it's a store conditional, and as far as the memory system can
301 // tell, the requesting context's lock is still valid.
302 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
303 req->contextId(), paddr);
304 allowStore = true;
305 break;
306 }
307 // If we didn't find a match, keep searching! Someone else may well
308 // have a reservation on this line here but we may find ours in just
309 // a little while.
310 i++;
311 }
312 req->setExtraData(allowStore ? 1 : 0);
313 }
314 // LLSCs that succeeded AND non-LLSC stores both fall into here:
315 if (allowStore) {
316 // We write address paddr. However, there may be several entries with a
317 // reservation on this address (for other contextIds) and they must all
318 // be removed.
319 i = lockedAddrList.begin();
320 while (i != lockedAddrList.end()) {
321 if (i->addr == paddr) {
322 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
323 i->contextId, paddr);
324 ContextID owner_cid = i->contextId;
325 assert(owner_cid != InvalidContextID);
326 ContextID requestor_cid = req->hasContextId() ?
327 req->contextId() :
328 InvalidContextID;
329 if (owner_cid != requestor_cid) {
330 ThreadContext* ctx = system()->threads[owner_cid];
331 TheISA::globalClearExclusive(ctx);
332 }
333 i = lockedAddrList.erase(i);
334 } else {
335 i++;
336 }
337 }
338 }
339
340 return allowStore;
341 }
342
343 #if TRACING_ON
344 static inline void
345 tracePacket(System *sys, const char *label, PacketPtr pkt)
346 {
347 int size = pkt->getSize();
348 #if THE_ISA != NULL_ISA
349 if (size == 1 || size == 2 || size == 4 || size == 8) {
350 ByteOrder byte_order = sys->getGuestByteOrder();
351 DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
352 "%#x %c\n", label, sys->getRequestorName(pkt->req->
353 requestorId()), size, pkt->getAddr(),
354 size, pkt->getAddr(), pkt->getUintX(byte_order),
355 pkt->req->isUncacheable() ? 'U' : 'C');
356 return;
357 }
358 #endif
359 DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
360 label, sys->getRequestorName(pkt->req->requestorId()),
361 size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
362 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
363 }
364
365 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
366 #else
367 # define TRACE_PACKET(A)
368 #endif
369
370 void
371 AbstractMemory::access(PacketPtr pkt)
372 {
373 if (pkt->cacheResponding()) {
374 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
375 pkt->getAddr());
376 return;
377 }
378
379 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
380 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
381 pkt->getAddr());
382 return;
383 }
384
385 assert(pkt->getAddrRange().isSubset(range));
386
387 uint8_t *host_addr = toHostAddr(pkt->getAddr());
388
389 if (pkt->cmd == MemCmd::SwapReq) {
390 if (pkt->isAtomicOp()) {
391 if (pmemAddr) {
392 pkt->setData(host_addr);
393 (*(pkt->getAtomicOp()))(host_addr);
394 }
395 } else {
396 std::vector<uint8_t> overwrite_val(pkt->getSize());
397 uint64_t condition_val64;
398 uint32_t condition_val32;
399
400 panic_if(!pmemAddr, "Swap only works if there is real memory " \
401 "(i.e. null=False)");
402
403 bool overwrite_mem = true;
404 // keep a copy of our possible write value, and copy what is at the
405 // memory address into the packet
406 pkt->writeData(&overwrite_val[0]);
407 pkt->setData(host_addr);
408
409 if (pkt->req->isCondSwap()) {
410 if (pkt->getSize() == sizeof(uint64_t)) {
411 condition_val64 = pkt->req->getExtraData();
412 overwrite_mem = !std::memcmp(&condition_val64, host_addr,
413 sizeof(uint64_t));
414 } else if (pkt->getSize() == sizeof(uint32_t)) {
415 condition_val32 = (uint32_t)pkt->req->getExtraData();
416 overwrite_mem = !std::memcmp(&condition_val32, host_addr,
417 sizeof(uint32_t));
418 } else
419 panic("Invalid size for conditional read/write\n");
420 }
421
422 if (overwrite_mem)
423 std::memcpy(host_addr, &overwrite_val[0], pkt->getSize());
424
425 assert(!pkt->req->isInstFetch());
426 TRACE_PACKET("Read/Write");
427 stats.numOther[pkt->req->requestorId()]++;
428 }
429 } else if (pkt->isRead()) {
430 assert(!pkt->isWrite());
431 if (pkt->isLLSC()) {
432 assert(!pkt->fromCache());
433 // if the packet is not coming from a cache then we have
434 // to do the LL/SC tracking here
435 trackLoadLocked(pkt);
436 }
437 if (pmemAddr) {
438 pkt->setData(host_addr);
439 }
440 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
441 stats.numReads[pkt->req->requestorId()]++;
442 stats.bytesRead[pkt->req->requestorId()] += pkt->getSize();
443 if (pkt->req->isInstFetch())
444 stats.bytesInstRead[pkt->req->requestorId()] += pkt->getSize();
445 } else if (pkt->isInvalidate() || pkt->isClean()) {
446 assert(!pkt->isWrite());
447 // in a fastmem system invalidating and/or cleaning packets
448 // can be seen due to cache maintenance requests
449
450 // no need to do anything
451 } else if (pkt->isWrite()) {
452 if (writeOK(pkt)) {
453 if (pmemAddr) {
454 pkt->writeData(host_addr);
455 DPRINTF(MemoryAccess, "%s write due to %s\n",
456 __func__, pkt->print());
457 }
458 assert(!pkt->req->isInstFetch());
459 TRACE_PACKET("Write");
460 stats.numWrites[pkt->req->requestorId()]++;
461 stats.bytesWritten[pkt->req->requestorId()] += pkt->getSize();
462 }
463 } else {
464 panic("Unexpected packet %s", pkt->print());
465 }
466
467 if (pkt->needsResponse()) {
468 pkt->makeResponse();
469 }
470 }
471
472 void
473 AbstractMemory::functionalAccess(PacketPtr pkt)
474 {
475 assert(pkt->getAddrRange().isSubset(range));
476
477 uint8_t *host_addr = toHostAddr(pkt->getAddr());
478
479 if (pkt->isRead()) {
480 if (pmemAddr) {
481 pkt->setData(host_addr);
482 }
483 TRACE_PACKET("Read");
484 pkt->makeResponse();
485 } else if (pkt->isWrite()) {
486 if (pmemAddr) {
487 pkt->writeData(host_addr);
488 }
489 TRACE_PACKET("Write");
490 pkt->makeResponse();
491 } else if (pkt->isPrint()) {
492 Packet::PrintReqState *prs =
493 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
494 assert(prs);
495 // Need to call printLabels() explicitly since we're not going
496 // through printObj().
497 prs->printLabels();
498 // Right now we just print the single byte at the specified address.
499 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *host_addr);
500 } else {
501 panic("AbstractMemory: unimplemented functional command %s",
502 pkt->cmdString());
503 }
504 }