mem: Remove units from stats description
[gem5.git] / src / mem / abstract_mem.cc
1 /*
2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/abstract_mem.hh"
42
43 #include <vector>
44
45 #include "arch/locked_mem.hh"
46 #include "base/loader/memory_image.hh"
47 #include "base/loader/object_file.hh"
48 #include "cpu/thread_context.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/MemoryAccess.hh"
51 #include "mem/packet_access.hh"
52 #include "sim/system.hh"
53
54 AbstractMemory::AbstractMemory(const Params &p) :
55 ClockedObject(p), range(p.range), pmemAddr(NULL),
56 backdoor(params().range, nullptr,
57 (MemBackdoor::Flags)(MemBackdoor::Readable |
58 MemBackdoor::Writeable)),
59 confTableReported(p.conf_table_reported), inAddrMap(p.in_addr_map),
60 kvmMap(p.kvm_map), _system(NULL),
61 stats(*this)
62 {
63 panic_if(!range.valid() || !range.size(),
64 "Memory range %s must be valid with non-zero size.",
65 range.to_string());
66 }
67
68 void
69 AbstractMemory::initState()
70 {
71 ClockedObject::initState();
72
73 const auto &file = params().image_file;
74 if (file == "")
75 return;
76
77 auto *object = Loader::createObjectFile(file, true);
78 fatal_if(!object, "%s: Could not load %s.", name(), file);
79
80 Loader::debugSymbolTable.insert(*object->symtab().globals());
81 Loader::MemoryImage image = object->buildImage();
82
83 AddrRange image_range(image.minAddr(), image.maxAddr());
84 if (!range.contains(image_range.start())) {
85 warn("%s: Moving image from %s to memory address range %s.",
86 name(), image_range.to_string(), range.to_string());
87 image = image.offset(range.start());
88 image_range = AddrRange(image.minAddr(), image.maxAddr());
89 }
90 panic_if(!image_range.isSubset(range), "%s: memory image %s doesn't fit.",
91 name(), file);
92
93 PortProxy proxy([this](PacketPtr pkt) { functionalAccess(pkt); },
94 system()->cacheLineSize());
95
96 panic_if(!image.write(proxy), "%s: Unable to write image.");
97 }
98
99 void
100 AbstractMemory::setBackingStore(uint8_t* pmem_addr)
101 {
102 // If there was an existing backdoor, let everybody know it's going away.
103 if (backdoor.ptr())
104 backdoor.invalidate();
105
106 // The back door can't handle interleaved memory.
107 backdoor.ptr(range.interleaved() ? nullptr : pmem_addr);
108
109 pmemAddr = pmem_addr;
110 }
111
112 AbstractMemory::MemStats::MemStats(AbstractMemory &_mem)
113 : Stats::Group(&_mem), mem(_mem),
114 ADD_STAT(bytesRead, UNIT_BYTE, "Number of bytes read from this memory"),
115 ADD_STAT(bytesInstRead, UNIT_BYTE,
116 "Number of instructions bytes read from this memory"),
117 ADD_STAT(bytesWritten, UNIT_BYTE,
118 "Number of bytes written to this memory"),
119 ADD_STAT(numReads, UNIT_COUNT,
120 "Number of read requests responded to by this memory"),
121 ADD_STAT(numWrites, UNIT_COUNT,
122 "Number of write requests responded to by this memory"),
123 ADD_STAT(numOther, UNIT_COUNT,
124 "Number of other requests responded to by this memory"),
125 ADD_STAT(bwRead, UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
126 "Total read bandwidth from this memory"),
127 ADD_STAT(bwInstRead, UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
128 "Instruction read bandwidth from this memory"),
129 ADD_STAT(bwWrite, UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
130 "Write bandwidth from this memory"),
131 ADD_STAT(bwTotal, UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
132 "Total bandwidth to/from this memory")
133 {
134 }
135
136 void
137 AbstractMemory::MemStats::regStats()
138 {
139 using namespace Stats;
140
141 Stats::Group::regStats();
142
143 System *sys = mem.system();
144 assert(sys);
145 const auto max_requestors = sys->maxRequestors();
146
147 bytesRead
148 .init(max_requestors)
149 .flags(total | nozero | nonan)
150 ;
151 for (int i = 0; i < max_requestors; i++) {
152 bytesRead.subname(i, sys->getRequestorName(i));
153 }
154
155 bytesInstRead
156 .init(max_requestors)
157 .flags(total | nozero | nonan)
158 ;
159 for (int i = 0; i < max_requestors; i++) {
160 bytesInstRead.subname(i, sys->getRequestorName(i));
161 }
162
163 bytesWritten
164 .init(max_requestors)
165 .flags(total | nozero | nonan)
166 ;
167 for (int i = 0; i < max_requestors; i++) {
168 bytesWritten.subname(i, sys->getRequestorName(i));
169 }
170
171 numReads
172 .init(max_requestors)
173 .flags(total | nozero | nonan)
174 ;
175 for (int i = 0; i < max_requestors; i++) {
176 numReads.subname(i, sys->getRequestorName(i));
177 }
178
179 numWrites
180 .init(max_requestors)
181 .flags(total | nozero | nonan)
182 ;
183 for (int i = 0; i < max_requestors; i++) {
184 numWrites.subname(i, sys->getRequestorName(i));
185 }
186
187 numOther
188 .init(max_requestors)
189 .flags(total | nozero | nonan)
190 ;
191 for (int i = 0; i < max_requestors; i++) {
192 numOther.subname(i, sys->getRequestorName(i));
193 }
194
195 bwRead
196 .precision(0)
197 .prereq(bytesRead)
198 .flags(total | nozero | nonan)
199 ;
200 for (int i = 0; i < max_requestors; i++) {
201 bwRead.subname(i, sys->getRequestorName(i));
202 }
203
204 bwInstRead
205 .precision(0)
206 .prereq(bytesInstRead)
207 .flags(total | nozero | nonan)
208 ;
209 for (int i = 0; i < max_requestors; i++) {
210 bwInstRead.subname(i, sys->getRequestorName(i));
211 }
212
213 bwWrite
214 .precision(0)
215 .prereq(bytesWritten)
216 .flags(total | nozero | nonan)
217 ;
218 for (int i = 0; i < max_requestors; i++) {
219 bwWrite.subname(i, sys->getRequestorName(i));
220 }
221
222 bwTotal
223 .precision(0)
224 .prereq(bwTotal)
225 .flags(total | nozero | nonan)
226 ;
227 for (int i = 0; i < max_requestors; i++) {
228 bwTotal.subname(i, sys->getRequestorName(i));
229 }
230
231 bwRead = bytesRead / simSeconds;
232 bwInstRead = bytesInstRead / simSeconds;
233 bwWrite = bytesWritten / simSeconds;
234 bwTotal = (bytesRead + bytesWritten) / simSeconds;
235 }
236
237 AddrRange
238 AbstractMemory::getAddrRange() const
239 {
240 return range;
241 }
242
243 // Add load-locked to tracking list. Should only be called if the
244 // operation is a load and the LLSC flag is set.
245 void
246 AbstractMemory::trackLoadLocked(PacketPtr pkt)
247 {
248 const RequestPtr &req = pkt->req;
249 Addr paddr = LockedAddr::mask(req->getPaddr());
250
251 // first we check if we already have a locked addr for this
252 // xc. Since each xc only gets one, we just update the
253 // existing record with the new address.
254 std::list<LockedAddr>::iterator i;
255
256 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
257 if (i->matchesContext(req)) {
258 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
259 req->contextId(), paddr);
260 i->addr = paddr;
261 return;
262 }
263 }
264
265 // no record for this xc: need to allocate a new one
266 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
267 req->contextId(), paddr);
268 lockedAddrList.push_front(LockedAddr(req));
269 backdoor.invalidate();
270 }
271
272
273 // Called on *writes* only... both regular stores and
274 // store-conditional operations. Check for conventional stores which
275 // conflict with locked addresses, and for success/failure of store
276 // conditionals.
277 bool
278 AbstractMemory::checkLockedAddrList(PacketPtr pkt)
279 {
280 const RequestPtr &req = pkt->req;
281 Addr paddr = LockedAddr::mask(req->getPaddr());
282 bool isLLSC = pkt->isLLSC();
283
284 // Initialize return value. Non-conditional stores always
285 // succeed. Assume conditional stores will fail until proven
286 // otherwise.
287 bool allowStore = !isLLSC;
288
289 // Iterate over list. Note that there could be multiple matching records,
290 // as more than one context could have done a load locked to this location.
291 // Only remove records when we succeed in finding a record for (xc, addr);
292 // then, remove all records with this address. Failed store-conditionals do
293 // not blow unrelated reservations.
294 std::list<LockedAddr>::iterator i = lockedAddrList.begin();
295
296 if (isLLSC) {
297 while (i != lockedAddrList.end()) {
298 if (i->addr == paddr && i->matchesContext(req)) {
299 // it's a store conditional, and as far as the memory system can
300 // tell, the requesting context's lock is still valid.
301 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
302 req->contextId(), paddr);
303 allowStore = true;
304 break;
305 }
306 // If we didn't find a match, keep searching! Someone else may well
307 // have a reservation on this line here but we may find ours in just
308 // a little while.
309 i++;
310 }
311 req->setExtraData(allowStore ? 1 : 0);
312 }
313 // LLSCs that succeeded AND non-LLSC stores both fall into here:
314 if (allowStore) {
315 // We write address paddr. However, there may be several entries with a
316 // reservation on this address (for other contextIds) and they must all
317 // be removed.
318 i = lockedAddrList.begin();
319 while (i != lockedAddrList.end()) {
320 if (i->addr == paddr) {
321 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
322 i->contextId, paddr);
323 ContextID owner_cid = i->contextId;
324 assert(owner_cid != InvalidContextID);
325 ContextID requestor_cid = req->hasContextId() ?
326 req->contextId() :
327 InvalidContextID;
328 if (owner_cid != requestor_cid) {
329 ThreadContext* ctx = system()->threads[owner_cid];
330 TheISA::globalClearExclusive(ctx);
331 }
332 i = lockedAddrList.erase(i);
333 } else {
334 i++;
335 }
336 }
337 }
338
339 return allowStore;
340 }
341
342 #if TRACING_ON
343 static inline void
344 tracePacket(System *sys, const char *label, PacketPtr pkt)
345 {
346 int size = pkt->getSize();
347 if (size == 1 || size == 2 || size == 4 || size == 8) {
348 ByteOrder byte_order = sys->getGuestByteOrder();
349 DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x data "
350 "%#x %c\n", label, sys->getRequestorName(pkt->req->
351 requestorId()), size, pkt->getAddr(),
352 size, pkt->getAddr(), pkt->getUintX(byte_order),
353 pkt->req->isUncacheable() ? 'U' : 'C');
354 return;
355 }
356 DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
357 label, sys->getRequestorName(pkt->req->requestorId()),
358 size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
359 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
360 }
361
362 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
363 #else
364 # define TRACE_PACKET(A)
365 #endif
366
367 void
368 AbstractMemory::access(PacketPtr pkt)
369 {
370 if (pkt->cacheResponding()) {
371 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
372 pkt->getAddr());
373 return;
374 }
375
376 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
377 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
378 pkt->getAddr());
379 return;
380 }
381
382 assert(pkt->getAddrRange().isSubset(range));
383
384 uint8_t *host_addr = toHostAddr(pkt->getAddr());
385
386 if (pkt->cmd == MemCmd::SwapReq) {
387 if (pkt->isAtomicOp()) {
388 if (pmemAddr) {
389 pkt->setData(host_addr);
390 (*(pkt->getAtomicOp()))(host_addr);
391 }
392 } else {
393 std::vector<uint8_t> overwrite_val(pkt->getSize());
394 uint64_t condition_val64;
395 uint32_t condition_val32;
396
397 panic_if(!pmemAddr, "Swap only works if there is real memory " \
398 "(i.e. null=False)");
399
400 bool overwrite_mem = true;
401 // keep a copy of our possible write value, and copy what is at the
402 // memory address into the packet
403 pkt->writeData(&overwrite_val[0]);
404 pkt->setData(host_addr);
405
406 if (pkt->req->isCondSwap()) {
407 if (pkt->getSize() == sizeof(uint64_t)) {
408 condition_val64 = pkt->req->getExtraData();
409 overwrite_mem = !std::memcmp(&condition_val64, host_addr,
410 sizeof(uint64_t));
411 } else if (pkt->getSize() == sizeof(uint32_t)) {
412 condition_val32 = (uint32_t)pkt->req->getExtraData();
413 overwrite_mem = !std::memcmp(&condition_val32, host_addr,
414 sizeof(uint32_t));
415 } else
416 panic("Invalid size for conditional read/write\n");
417 }
418
419 if (overwrite_mem)
420 std::memcpy(host_addr, &overwrite_val[0], pkt->getSize());
421
422 assert(!pkt->req->isInstFetch());
423 TRACE_PACKET("Read/Write");
424 stats.numOther[pkt->req->requestorId()]++;
425 }
426 } else if (pkt->isRead()) {
427 assert(!pkt->isWrite());
428 if (pkt->isLLSC()) {
429 assert(!pkt->fromCache());
430 // if the packet is not coming from a cache then we have
431 // to do the LL/SC tracking here
432 trackLoadLocked(pkt);
433 }
434 if (pmemAddr) {
435 pkt->setData(host_addr);
436 }
437 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
438 stats.numReads[pkt->req->requestorId()]++;
439 stats.bytesRead[pkt->req->requestorId()] += pkt->getSize();
440 if (pkt->req->isInstFetch())
441 stats.bytesInstRead[pkt->req->requestorId()] += pkt->getSize();
442 } else if (pkt->isInvalidate() || pkt->isClean()) {
443 assert(!pkt->isWrite());
444 // in a fastmem system invalidating and/or cleaning packets
445 // can be seen due to cache maintenance requests
446
447 // no need to do anything
448 } else if (pkt->isWrite()) {
449 if (writeOK(pkt)) {
450 if (pmemAddr) {
451 pkt->writeData(host_addr);
452 DPRINTF(MemoryAccess, "%s write due to %s\n",
453 __func__, pkt->print());
454 }
455 assert(!pkt->req->isInstFetch());
456 TRACE_PACKET("Write");
457 stats.numWrites[pkt->req->requestorId()]++;
458 stats.bytesWritten[pkt->req->requestorId()] += pkt->getSize();
459 }
460 } else {
461 panic("Unexpected packet %s", pkt->print());
462 }
463
464 if (pkt->needsResponse()) {
465 pkt->makeResponse();
466 }
467 }
468
469 void
470 AbstractMemory::functionalAccess(PacketPtr pkt)
471 {
472 assert(pkt->getAddrRange().isSubset(range));
473
474 uint8_t *host_addr = toHostAddr(pkt->getAddr());
475
476 if (pkt->isRead()) {
477 if (pmemAddr) {
478 pkt->setData(host_addr);
479 }
480 TRACE_PACKET("Read");
481 pkt->makeResponse();
482 } else if (pkt->isWrite()) {
483 if (pmemAddr) {
484 pkt->writeData(host_addr);
485 }
486 TRACE_PACKET("Write");
487 pkt->makeResponse();
488 } else if (pkt->isPrint()) {
489 Packet::PrintReqState *prs =
490 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
491 assert(prs);
492 // Need to call printLabels() explicitly since we're not going
493 // through printObj().
494 prs->printLabels();
495 // Right now we just print the single byte at the specified address.
496 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *host_addr);
497 } else {
498 panic("AbstractMemory: unimplemented functional command %s",
499 pkt->cmdString());
500 }
501 }