arch,base,cpu,sim: Statically allocate debugSymbolTable.
[gem5.git] / src / mem / abstract_mem.cc
1 /*
2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/abstract_mem.hh"
42
43 #include <vector>
44
45 #include "arch/locked_mem.hh"
46 #include "base/loader/memory_image.hh"
47 #include "base/loader/object_file.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/LLSC.hh"
51 #include "debug/MemoryAccess.hh"
52 #include "mem/packet_access.hh"
53 #include "sim/system.hh"
54
55 using namespace std;
56
57 AbstractMemory::AbstractMemory(const Params *p) :
58 ClockedObject(p), range(params()->range), pmemAddr(NULL),
59 backdoor(params()->range, nullptr,
60 (MemBackdoor::Flags)(MemBackdoor::Readable |
61 MemBackdoor::Writeable)),
62 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
63 kvmMap(p->kvm_map), _system(NULL),
64 stats(*this)
65 {
66 panic_if(!range.valid() || !range.size(),
67 "Memory range %s must be valid with non-zero size.",
68 range.to_string());
69 }
70
71 void
72 AbstractMemory::initState()
73 {
74 ClockedObject::initState();
75
76 const auto &file = params()->image_file;
77 if (file == "")
78 return;
79
80 auto *object = Loader::createObjectFile(file, true);
81 fatal_if(!object, "%s: Could not load %s.", name(), file);
82
83 panic_if(!object->loadGlobalSymbols(&Loader::debugSymbolTable),
84 "%s: Could not load symbols from %s.", name(), file);
85
86 Loader::MemoryImage image = object->buildImage();
87
88 AddrRange image_range(image.minAddr(), image.maxAddr());
89 if (!range.contains(image_range.start())) {
90 warn("%s: Moving image from %s to memory address range %s.",
91 name(), image_range.to_string(), range.to_string());
92 image = image.offset(range.start());
93 image_range = AddrRange(image.minAddr(), image.maxAddr());
94 }
95 panic_if(!image_range.isSubset(range), "%s: memory image %s doesn't fit.",
96 name(), file);
97
98 PortProxy proxy([this](PacketPtr pkt) { functionalAccess(pkt); }, size());
99
100 panic_if(!image.write(proxy), "%s: Unable to write image.");
101 }
102
103 void
104 AbstractMemory::setBackingStore(uint8_t* pmem_addr)
105 {
106 // If there was an existing backdoor, let everybody know it's going away.
107 if (backdoor.ptr())
108 backdoor.invalidate();
109
110 // The back door can't handle interleaved memory.
111 backdoor.ptr(range.interleaved() ? nullptr : pmem_addr);
112
113 pmemAddr = pmem_addr;
114 }
115
116 AbstractMemory::MemStats::MemStats(AbstractMemory &_mem)
117 : Stats::Group(&_mem), mem(_mem),
118 bytesRead(this, "bytes_read",
119 "Number of bytes read from this memory"),
120 bytesInstRead(this, "bytes_inst_read",
121 "Number of instructions bytes read from this memory"),
122 bytesWritten(this, "bytes_written",
123 "Number of bytes written to this memory"),
124 numReads(this, "num_reads",
125 "Number of read requests responded to by this memory"),
126 numWrites(this, "num_writes",
127 "Number of write requests responded to by this memory"),
128 numOther(this, "num_other",
129 "Number of other requests responded to by this memory"),
130 bwRead(this, "bw_read",
131 "Total read bandwidth from this memory (bytes/s)"),
132 bwInstRead(this, "bw_inst_read",
133 "Instruction read bandwidth from this memory (bytes/s)"),
134 bwWrite(this, "bw_write",
135 "Write bandwidth from this memory (bytes/s)"),
136 bwTotal(this, "bw_total",
137 "Total bandwidth to/from this memory (bytes/s)")
138 {
139 }
140
141 void
142 AbstractMemory::MemStats::regStats()
143 {
144 using namespace Stats;
145
146 Stats::Group::regStats();
147
148 System *sys = mem.system();
149 assert(sys);
150 const auto max_masters = sys->maxMasters();
151
152 bytesRead
153 .init(max_masters)
154 .flags(total | nozero | nonan)
155 ;
156 for (int i = 0; i < max_masters; i++) {
157 bytesRead.subname(i, sys->getMasterName(i));
158 }
159
160 bytesInstRead
161 .init(max_masters)
162 .flags(total | nozero | nonan)
163 ;
164 for (int i = 0; i < max_masters; i++) {
165 bytesInstRead.subname(i, sys->getMasterName(i));
166 }
167
168 bytesWritten
169 .init(max_masters)
170 .flags(total | nozero | nonan)
171 ;
172 for (int i = 0; i < max_masters; i++) {
173 bytesWritten.subname(i, sys->getMasterName(i));
174 }
175
176 numReads
177 .init(max_masters)
178 .flags(total | nozero | nonan)
179 ;
180 for (int i = 0; i < max_masters; i++) {
181 numReads.subname(i, sys->getMasterName(i));
182 }
183
184 numWrites
185 .init(max_masters)
186 .flags(total | nozero | nonan)
187 ;
188 for (int i = 0; i < max_masters; i++) {
189 numWrites.subname(i, sys->getMasterName(i));
190 }
191
192 numOther
193 .init(max_masters)
194 .flags(total | nozero | nonan)
195 ;
196 for (int i = 0; i < max_masters; i++) {
197 numOther.subname(i, sys->getMasterName(i));
198 }
199
200 bwRead
201 .precision(0)
202 .prereq(bytesRead)
203 .flags(total | nozero | nonan)
204 ;
205 for (int i = 0; i < max_masters; i++) {
206 bwRead.subname(i, sys->getMasterName(i));
207 }
208
209 bwInstRead
210 .precision(0)
211 .prereq(bytesInstRead)
212 .flags(total | nozero | nonan)
213 ;
214 for (int i = 0; i < max_masters; i++) {
215 bwInstRead.subname(i, sys->getMasterName(i));
216 }
217
218 bwWrite
219 .precision(0)
220 .prereq(bytesWritten)
221 .flags(total | nozero | nonan)
222 ;
223 for (int i = 0; i < max_masters; i++) {
224 bwWrite.subname(i, sys->getMasterName(i));
225 }
226
227 bwTotal
228 .precision(0)
229 .prereq(bwTotal)
230 .flags(total | nozero | nonan)
231 ;
232 for (int i = 0; i < max_masters; i++) {
233 bwTotal.subname(i, sys->getMasterName(i));
234 }
235
236 bwRead = bytesRead / simSeconds;
237 bwInstRead = bytesInstRead / simSeconds;
238 bwWrite = bytesWritten / simSeconds;
239 bwTotal = (bytesRead + bytesWritten) / simSeconds;
240 }
241
242 AddrRange
243 AbstractMemory::getAddrRange() const
244 {
245 return range;
246 }
247
248 // Add load-locked to tracking list. Should only be called if the
249 // operation is a load and the LLSC flag is set.
250 void
251 AbstractMemory::trackLoadLocked(PacketPtr pkt)
252 {
253 const RequestPtr &req = pkt->req;
254 Addr paddr = LockedAddr::mask(req->getPaddr());
255
256 // first we check if we already have a locked addr for this
257 // xc. Since each xc only gets one, we just update the
258 // existing record with the new address.
259 list<LockedAddr>::iterator i;
260
261 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
262 if (i->matchesContext(req)) {
263 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
264 req->contextId(), paddr);
265 i->addr = paddr;
266 return;
267 }
268 }
269
270 // no record for this xc: need to allocate a new one
271 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
272 req->contextId(), paddr);
273 lockedAddrList.push_front(LockedAddr(req));
274 }
275
276
277 // Called on *writes* only... both regular stores and
278 // store-conditional operations. Check for conventional stores which
279 // conflict with locked addresses, and for success/failure of store
280 // conditionals.
281 bool
282 AbstractMemory::checkLockedAddrList(PacketPtr pkt)
283 {
284 const RequestPtr &req = pkt->req;
285 Addr paddr = LockedAddr::mask(req->getPaddr());
286 bool isLLSC = pkt->isLLSC();
287
288 // Initialize return value. Non-conditional stores always
289 // succeed. Assume conditional stores will fail until proven
290 // otherwise.
291 bool allowStore = !isLLSC;
292
293 // Iterate over list. Note that there could be multiple matching records,
294 // as more than one context could have done a load locked to this location.
295 // Only remove records when we succeed in finding a record for (xc, addr);
296 // then, remove all records with this address. Failed store-conditionals do
297 // not blow unrelated reservations.
298 list<LockedAddr>::iterator i = lockedAddrList.begin();
299
300 if (isLLSC) {
301 while (i != lockedAddrList.end()) {
302 if (i->addr == paddr && i->matchesContext(req)) {
303 // it's a store conditional, and as far as the memory system can
304 // tell, the requesting context's lock is still valid.
305 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
306 req->contextId(), paddr);
307 allowStore = true;
308 break;
309 }
310 // If we didn't find a match, keep searching! Someone else may well
311 // have a reservation on this line here but we may find ours in just
312 // a little while.
313 i++;
314 }
315 req->setExtraData(allowStore ? 1 : 0);
316 }
317 // LLSCs that succeeded AND non-LLSC stores both fall into here:
318 if (allowStore) {
319 // We write address paddr. However, there may be several entries with a
320 // reservation on this address (for other contextIds) and they must all
321 // be removed.
322 i = lockedAddrList.begin();
323 while (i != lockedAddrList.end()) {
324 if (i->addr == paddr) {
325 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
326 i->contextId, paddr);
327 ContextID owner_cid = i->contextId;
328 assert(owner_cid != InvalidContextID);
329 ContextID requester_cid = req->hasContextId() ?
330 req->contextId() :
331 InvalidContextID;
332 if (owner_cid != requester_cid) {
333 ThreadContext* ctx = system()->getThreadContext(owner_cid);
334 TheISA::globalClearExclusive(ctx);
335 }
336 i = lockedAddrList.erase(i);
337 } else {
338 i++;
339 }
340 }
341 }
342
343 return allowStore;
344 }
345
346 #if TRACING_ON
347 static inline void
348 tracePacket(System *sys, const char *label, PacketPtr pkt)
349 {
350 int size = pkt->getSize();
351 #if THE_ISA != NULL_ISA
352 if (size == 1 || size == 2 || size == 4 || size == 8) {
353 DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
354 "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()),
355 size, pkt->getAddr(), pkt->getUintX(TheISA::GuestByteOrder),
356 pkt->req->isUncacheable() ? 'U' : 'C');
357 return;
358 }
359 #endif
360 DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
361 label, sys->getMasterName(pkt->req->masterId()),
362 size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
363 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
364 }
365
366 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
367 #else
368 # define TRACE_PACKET(A)
369 #endif
370
371 void
372 AbstractMemory::access(PacketPtr pkt)
373 {
374 if (pkt->cacheResponding()) {
375 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
376 pkt->getAddr());
377 return;
378 }
379
380 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
381 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
382 pkt->getAddr());
383 return;
384 }
385
386 assert(pkt->getAddrRange().isSubset(range));
387
388 uint8_t *host_addr = toHostAddr(pkt->getAddr());
389
390 if (pkt->cmd == MemCmd::SwapReq) {
391 if (pkt->isAtomicOp()) {
392 if (pmemAddr) {
393 pkt->setData(host_addr);
394 (*(pkt->getAtomicOp()))(host_addr);
395 }
396 } else {
397 std::vector<uint8_t> overwrite_val(pkt->getSize());
398 uint64_t condition_val64;
399 uint32_t condition_val32;
400
401 panic_if(!pmemAddr, "Swap only works if there is real memory " \
402 "(i.e. null=False)");
403
404 bool overwrite_mem = true;
405 // keep a copy of our possible write value, and copy what is at the
406 // memory address into the packet
407 pkt->writeData(&overwrite_val[0]);
408 pkt->setData(host_addr);
409
410 if (pkt->req->isCondSwap()) {
411 if (pkt->getSize() == sizeof(uint64_t)) {
412 condition_val64 = pkt->req->getExtraData();
413 overwrite_mem = !std::memcmp(&condition_val64, host_addr,
414 sizeof(uint64_t));
415 } else if (pkt->getSize() == sizeof(uint32_t)) {
416 condition_val32 = (uint32_t)pkt->req->getExtraData();
417 overwrite_mem = !std::memcmp(&condition_val32, host_addr,
418 sizeof(uint32_t));
419 } else
420 panic("Invalid size for conditional read/write\n");
421 }
422
423 if (overwrite_mem)
424 std::memcpy(host_addr, &overwrite_val[0], pkt->getSize());
425
426 assert(!pkt->req->isInstFetch());
427 TRACE_PACKET("Read/Write");
428 stats.numOther[pkt->req->masterId()]++;
429 }
430 } else if (pkt->isRead()) {
431 assert(!pkt->isWrite());
432 if (pkt->isLLSC()) {
433 assert(!pkt->fromCache());
434 // if the packet is not coming from a cache then we have
435 // to do the LL/SC tracking here
436 trackLoadLocked(pkt);
437 }
438 if (pmemAddr) {
439 pkt->setData(host_addr);
440 }
441 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
442 stats.numReads[pkt->req->masterId()]++;
443 stats.bytesRead[pkt->req->masterId()] += pkt->getSize();
444 if (pkt->req->isInstFetch())
445 stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize();
446 } else if (pkt->isInvalidate() || pkt->isClean()) {
447 assert(!pkt->isWrite());
448 // in a fastmem system invalidating and/or cleaning packets
449 // can be seen due to cache maintenance requests
450
451 // no need to do anything
452 } else if (pkt->isWrite()) {
453 if (writeOK(pkt)) {
454 if (pmemAddr) {
455 pkt->writeData(host_addr);
456 DPRINTF(MemoryAccess, "%s write due to %s\n",
457 __func__, pkt->print());
458 }
459 assert(!pkt->req->isInstFetch());
460 TRACE_PACKET("Write");
461 stats.numWrites[pkt->req->masterId()]++;
462 stats.bytesWritten[pkt->req->masterId()] += pkt->getSize();
463 }
464 } else {
465 panic("Unexpected packet %s", pkt->print());
466 }
467
468 if (pkt->needsResponse()) {
469 pkt->makeResponse();
470 }
471 }
472
473 void
474 AbstractMemory::functionalAccess(PacketPtr pkt)
475 {
476 assert(pkt->getAddrRange().isSubset(range));
477
478 uint8_t *host_addr = toHostAddr(pkt->getAddr());
479
480 if (pkt->isRead()) {
481 if (pmemAddr) {
482 pkt->setData(host_addr);
483 }
484 TRACE_PACKET("Read");
485 pkt->makeResponse();
486 } else if (pkt->isWrite()) {
487 if (pmemAddr) {
488 pkt->writeData(host_addr);
489 }
490 TRACE_PACKET("Write");
491 pkt->makeResponse();
492 } else if (pkt->isPrint()) {
493 Packet::PrintReqState *prs =
494 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
495 assert(prs);
496 // Need to call printLabels() explicitly since we're not going
497 // through printObj().
498 prs->printLabels();
499 // Right now we just print the single byte at the specified address.
500 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *host_addr);
501 } else {
502 panic("AbstractMemory: unimplemented functional command %s",
503 pkt->cmdString());
504 }
505 }