mem: Remove a check that the memory size is a multiple of the page size.
[gem5.git] / src / mem / abstract_mem.cc
1 /*
2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/abstract_mem.hh"
42
43 #include <vector>
44
45 #include "arch/locked_mem.hh"
46 #include "cpu/base.hh"
47 #include "cpu/thread_context.hh"
48 #include "debug/LLSC.hh"
49 #include "debug/MemoryAccess.hh"
50 #include "mem/packet_access.hh"
51 #include "sim/system.hh"
52
53 using namespace std;
54
55 AbstractMemory::AbstractMemory(const Params *p) :
56 ClockedObject(p), range(params()->range), pmemAddr(NULL),
57 backdoor(params()->range, nullptr,
58 (MemBackdoor::Flags)(MemBackdoor::Readable |
59 MemBackdoor::Writeable)),
60 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
61 kvmMap(p->kvm_map), _system(NULL),
62 stats(*this)
63 {
64 panic_if(!range.valid() || !range.size(),
65 "Memory range %s must be valid with non-zero size.",
66 range.to_string());
67 }
68
69 void
70 AbstractMemory::setBackingStore(uint8_t* pmem_addr)
71 {
72 // If there was an existing backdoor, let everybody know it's going away.
73 if (backdoor.ptr())
74 backdoor.invalidate();
75
76 // The back door can't handle interleaved memory.
77 backdoor.ptr(range.interleaved() ? nullptr : pmem_addr);
78
79 pmemAddr = pmem_addr;
80 }
81
82 AbstractMemory::MemStats::MemStats(AbstractMemory &_mem)
83 : Stats::Group(&_mem), mem(_mem),
84 bytesRead(this, "bytes_read",
85 "Number of bytes read from this memory"),
86 bytesInstRead(this, "bytes_inst_read",
87 "Number of instructions bytes read from this memory"),
88 bytesWritten(this, "bytes_written",
89 "Number of bytes written to this memory"),
90 numReads(this, "num_reads",
91 "Number of read requests responded to by this memory"),
92 numWrites(this, "num_writes",
93 "Number of write requests responded to by this memory"),
94 numOther(this, "num_other",
95 "Number of other requests responded to by this memory"),
96 bwRead(this, "bw_read",
97 "Total read bandwidth from this memory (bytes/s)"),
98 bwInstRead(this, "bw_inst_read",
99 "Instruction read bandwidth from this memory (bytes/s)"),
100 bwWrite(this, "bw_write",
101 "Write bandwidth from this memory (bytes/s)"),
102 bwTotal(this, "bw_total",
103 "Total bandwidth to/from this memory (bytes/s)")
104 {
105 }
106
107 void
108 AbstractMemory::MemStats::regStats()
109 {
110 using namespace Stats;
111
112 Stats::Group::regStats();
113
114 System *sys = mem.system();
115 assert(sys);
116 const auto max_masters = sys->maxMasters();
117
118 bytesRead
119 .init(max_masters)
120 .flags(total | nozero | nonan)
121 ;
122 for (int i = 0; i < max_masters; i++) {
123 bytesRead.subname(i, sys->getMasterName(i));
124 }
125
126 bytesInstRead
127 .init(max_masters)
128 .flags(total | nozero | nonan)
129 ;
130 for (int i = 0; i < max_masters; i++) {
131 bytesInstRead.subname(i, sys->getMasterName(i));
132 }
133
134 bytesWritten
135 .init(max_masters)
136 .flags(total | nozero | nonan)
137 ;
138 for (int i = 0; i < max_masters; i++) {
139 bytesWritten.subname(i, sys->getMasterName(i));
140 }
141
142 numReads
143 .init(max_masters)
144 .flags(total | nozero | nonan)
145 ;
146 for (int i = 0; i < max_masters; i++) {
147 numReads.subname(i, sys->getMasterName(i));
148 }
149
150 numWrites
151 .init(max_masters)
152 .flags(total | nozero | nonan)
153 ;
154 for (int i = 0; i < max_masters; i++) {
155 numWrites.subname(i, sys->getMasterName(i));
156 }
157
158 numOther
159 .init(max_masters)
160 .flags(total | nozero | nonan)
161 ;
162 for (int i = 0; i < max_masters; i++) {
163 numOther.subname(i, sys->getMasterName(i));
164 }
165
166 bwRead
167 .precision(0)
168 .prereq(bytesRead)
169 .flags(total | nozero | nonan)
170 ;
171 for (int i = 0; i < max_masters; i++) {
172 bwRead.subname(i, sys->getMasterName(i));
173 }
174
175 bwInstRead
176 .precision(0)
177 .prereq(bytesInstRead)
178 .flags(total | nozero | nonan)
179 ;
180 for (int i = 0; i < max_masters; i++) {
181 bwInstRead.subname(i, sys->getMasterName(i));
182 }
183
184 bwWrite
185 .precision(0)
186 .prereq(bytesWritten)
187 .flags(total | nozero | nonan)
188 ;
189 for (int i = 0; i < max_masters; i++) {
190 bwWrite.subname(i, sys->getMasterName(i));
191 }
192
193 bwTotal
194 .precision(0)
195 .prereq(bwTotal)
196 .flags(total | nozero | nonan)
197 ;
198 for (int i = 0; i < max_masters; i++) {
199 bwTotal.subname(i, sys->getMasterName(i));
200 }
201
202 bwRead = bytesRead / simSeconds;
203 bwInstRead = bytesInstRead / simSeconds;
204 bwWrite = bytesWritten / simSeconds;
205 bwTotal = (bytesRead + bytesWritten) / simSeconds;
206 }
207
208 AddrRange
209 AbstractMemory::getAddrRange() const
210 {
211 return range;
212 }
213
214 // Add load-locked to tracking list. Should only be called if the
215 // operation is a load and the LLSC flag is set.
216 void
217 AbstractMemory::trackLoadLocked(PacketPtr pkt)
218 {
219 const RequestPtr &req = pkt->req;
220 Addr paddr = LockedAddr::mask(req->getPaddr());
221
222 // first we check if we already have a locked addr for this
223 // xc. Since each xc only gets one, we just update the
224 // existing record with the new address.
225 list<LockedAddr>::iterator i;
226
227 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
228 if (i->matchesContext(req)) {
229 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
230 req->contextId(), paddr);
231 i->addr = paddr;
232 return;
233 }
234 }
235
236 // no record for this xc: need to allocate a new one
237 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
238 req->contextId(), paddr);
239 lockedAddrList.push_front(LockedAddr(req));
240 }
241
242
243 // Called on *writes* only... both regular stores and
244 // store-conditional operations. Check for conventional stores which
245 // conflict with locked addresses, and for success/failure of store
246 // conditionals.
247 bool
248 AbstractMemory::checkLockedAddrList(PacketPtr pkt)
249 {
250 const RequestPtr &req = pkt->req;
251 Addr paddr = LockedAddr::mask(req->getPaddr());
252 bool isLLSC = pkt->isLLSC();
253
254 // Initialize return value. Non-conditional stores always
255 // succeed. Assume conditional stores will fail until proven
256 // otherwise.
257 bool allowStore = !isLLSC;
258
259 // Iterate over list. Note that there could be multiple matching records,
260 // as more than one context could have done a load locked to this location.
261 // Only remove records when we succeed in finding a record for (xc, addr);
262 // then, remove all records with this address. Failed store-conditionals do
263 // not blow unrelated reservations.
264 list<LockedAddr>::iterator i = lockedAddrList.begin();
265
266 if (isLLSC) {
267 while (i != lockedAddrList.end()) {
268 if (i->addr == paddr && i->matchesContext(req)) {
269 // it's a store conditional, and as far as the memory system can
270 // tell, the requesting context's lock is still valid.
271 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
272 req->contextId(), paddr);
273 allowStore = true;
274 break;
275 }
276 // If we didn't find a match, keep searching! Someone else may well
277 // have a reservation on this line here but we may find ours in just
278 // a little while.
279 i++;
280 }
281 req->setExtraData(allowStore ? 1 : 0);
282 }
283 // LLSCs that succeeded AND non-LLSC stores both fall into here:
284 if (allowStore) {
285 // We write address paddr. However, there may be several entries with a
286 // reservation on this address (for other contextIds) and they must all
287 // be removed.
288 i = lockedAddrList.begin();
289 while (i != lockedAddrList.end()) {
290 if (i->addr == paddr) {
291 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
292 i->contextId, paddr);
293 ContextID owner_cid = i->contextId;
294 assert(owner_cid != InvalidContextID);
295 ContextID requester_cid = req->hasContextId() ?
296 req->contextId() :
297 InvalidContextID;
298 if (owner_cid != requester_cid) {
299 ThreadContext* ctx = system()->getThreadContext(owner_cid);
300 TheISA::globalClearExclusive(ctx);
301 }
302 i = lockedAddrList.erase(i);
303 } else {
304 i++;
305 }
306 }
307 }
308
309 return allowStore;
310 }
311
312 #if TRACING_ON
313 static inline void
314 tracePacket(System *sys, const char *label, PacketPtr pkt)
315 {
316 int size = pkt->getSize();
317 #if THE_ISA != NULL_ISA
318 if (size == 1 || size == 2 || size == 4 || size == 8) {
319 DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
320 "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()),
321 size, pkt->getAddr(), pkt->getUintX(TheISA::GuestByteOrder),
322 pkt->req->isUncacheable() ? 'U' : 'C');
323 return;
324 }
325 #endif
326 DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
327 label, sys->getMasterName(pkt->req->masterId()),
328 size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
329 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
330 }
331
332 # define TRACE_PACKET(A) tracePacket(system(), A, pkt)
333 #else
334 # define TRACE_PACKET(A)
335 #endif
336
337 void
338 AbstractMemory::access(PacketPtr pkt)
339 {
340 if (pkt->cacheResponding()) {
341 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
342 pkt->getAddr());
343 return;
344 }
345
346 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
347 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
348 pkt->getAddr());
349 return;
350 }
351
352 assert(pkt->getAddrRange().isSubset(range));
353
354 uint8_t *host_addr = toHostAddr(pkt->getAddr());
355
356 if (pkt->cmd == MemCmd::SwapReq) {
357 if (pkt->isAtomicOp()) {
358 if (pmemAddr) {
359 pkt->setData(host_addr);
360 (*(pkt->getAtomicOp()))(host_addr);
361 }
362 } else {
363 std::vector<uint8_t> overwrite_val(pkt->getSize());
364 uint64_t condition_val64;
365 uint32_t condition_val32;
366
367 panic_if(!pmemAddr, "Swap only works if there is real memory " \
368 "(i.e. null=False)");
369
370 bool overwrite_mem = true;
371 // keep a copy of our possible write value, and copy what is at the
372 // memory address into the packet
373 pkt->writeData(&overwrite_val[0]);
374 pkt->setData(host_addr);
375
376 if (pkt->req->isCondSwap()) {
377 if (pkt->getSize() == sizeof(uint64_t)) {
378 condition_val64 = pkt->req->getExtraData();
379 overwrite_mem = !std::memcmp(&condition_val64, host_addr,
380 sizeof(uint64_t));
381 } else if (pkt->getSize() == sizeof(uint32_t)) {
382 condition_val32 = (uint32_t)pkt->req->getExtraData();
383 overwrite_mem = !std::memcmp(&condition_val32, host_addr,
384 sizeof(uint32_t));
385 } else
386 panic("Invalid size for conditional read/write\n");
387 }
388
389 if (overwrite_mem)
390 std::memcpy(host_addr, &overwrite_val[0], pkt->getSize());
391
392 assert(!pkt->req->isInstFetch());
393 TRACE_PACKET("Read/Write");
394 stats.numOther[pkt->req->masterId()]++;
395 }
396 } else if (pkt->isRead()) {
397 assert(!pkt->isWrite());
398 if (pkt->isLLSC()) {
399 assert(!pkt->fromCache());
400 // if the packet is not coming from a cache then we have
401 // to do the LL/SC tracking here
402 trackLoadLocked(pkt);
403 }
404 if (pmemAddr) {
405 pkt->setData(host_addr);
406 }
407 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
408 stats.numReads[pkt->req->masterId()]++;
409 stats.bytesRead[pkt->req->masterId()] += pkt->getSize();
410 if (pkt->req->isInstFetch())
411 stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize();
412 } else if (pkt->isInvalidate() || pkt->isClean()) {
413 assert(!pkt->isWrite());
414 // in a fastmem system invalidating and/or cleaning packets
415 // can be seen due to cache maintenance requests
416
417 // no need to do anything
418 } else if (pkt->isWrite()) {
419 if (writeOK(pkt)) {
420 if (pmemAddr) {
421 pkt->writeData(host_addr);
422 DPRINTF(MemoryAccess, "%s write due to %s\n",
423 __func__, pkt->print());
424 }
425 assert(!pkt->req->isInstFetch());
426 TRACE_PACKET("Write");
427 stats.numWrites[pkt->req->masterId()]++;
428 stats.bytesWritten[pkt->req->masterId()] += pkt->getSize();
429 }
430 } else {
431 panic("Unexpected packet %s", pkt->print());
432 }
433
434 if (pkt->needsResponse()) {
435 pkt->makeResponse();
436 }
437 }
438
439 void
440 AbstractMemory::functionalAccess(PacketPtr pkt)
441 {
442 assert(pkt->getAddrRange().isSubset(range));
443
444 uint8_t *host_addr = toHostAddr(pkt->getAddr());
445
446 if (pkt->isRead()) {
447 if (pmemAddr) {
448 pkt->setData(host_addr);
449 }
450 TRACE_PACKET("Read");
451 pkt->makeResponse();
452 } else if (pkt->isWrite()) {
453 if (pmemAddr) {
454 pkt->writeData(host_addr);
455 }
456 TRACE_PACKET("Write");
457 pkt->makeResponse();
458 } else if (pkt->isPrint()) {
459 Packet::PrintReqState *prs =
460 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
461 assert(prs);
462 // Need to call printLabels() explicitly since we're not going
463 // through printObj().
464 prs->printLabels();
465 // Right now we just print the single byte at the specified address.
466 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *host_addr);
467 } else {
468 panic("AbstractMemory: unimplemented functional command %s",
469 pkt->cmdString());
470 }
471 }