1ddb5155e74529e160916a087347fd490ac1fcd2
[gem5.git] / src / learning_gem5 / part2 / simple_cache.cc
1 /*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Jason Lowe-Power
29 */
30
31 #include "learning_gem5/part2/simple_cache.hh"
32
33 #include "base/random.hh"
34 #include "debug/SimpleCache.hh"
35 #include "sim/system.hh"
36
37 SimpleCache::SimpleCache(SimpleCacheParams *params) :
38 MemObject(params),
39 latency(params->latency),
40 blockSize(params->system->cacheLineSize()),
41 capacity(params->size / blockSize),
42 memPort(params->name + ".mem_side", this),
43 blocked(false), originalPacket(nullptr), waitingPortId(-1)
44 {
45 // Since the CPU side ports are a vector of ports, create an instance of
46 // the CPUSidePort for each connection. This member of params is
47 // automatically created depending on the name of the vector port and
48 // holds the number of connections to this port name
49 for (int i = 0; i < params->port_cpu_side_connection_count; ++i) {
50 cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this);
51 }
52 }
53
54 BaseMasterPort&
55 SimpleCache::getMasterPort(const std::string& if_name, PortID idx)
56 {
57 panic_if(idx != InvalidPortID, "This object doesn't support vector ports");
58
59 // This is the name from the Python SimObject declaration in SimpleCache.py
60 if (if_name == "mem_side") {
61 return memPort;
62 } else {
63 // pass it along to our super class
64 return MemObject::getMasterPort(if_name, idx);
65 }
66 }
67
68 BaseSlavePort&
69 SimpleCache::getSlavePort(const std::string& if_name, PortID idx)
70 {
71 // This is the name from the Python SimObject declaration (SimpleMemobj.py)
72 if (if_name == "cpu_side" && idx < cpuPorts.size()) {
73 // We should have already created all of the ports in the constructor
74 return cpuPorts[idx];
75 } else {
76 // pass it along to our super class
77 return MemObject::getSlavePort(if_name, idx);
78 }
79 }
80
81 void
82 SimpleCache::CPUSidePort::sendPacket(PacketPtr pkt)
83 {
84 // Note: This flow control is very simple since the cache is blocking.
85
86 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
87
88 // If we can't send the packet across the port, store it for later.
89 DPRINTF(SimpleCache, "Sending %s to CPU\n", pkt->print());
90 if (!sendTimingResp(pkt)) {
91 DPRINTF(SimpleCache, "failed!\n");
92 blockedPacket = pkt;
93 }
94 }
95
96 AddrRangeList
97 SimpleCache::CPUSidePort::getAddrRanges() const
98 {
99 return owner->getAddrRanges();
100 }
101
102 void
103 SimpleCache::CPUSidePort::trySendRetry()
104 {
105 if (needRetry && blockedPacket == nullptr) {
106 // Only send a retry if the port is now completely free
107 needRetry = false;
108 DPRINTF(SimpleCache, "Sending retry req.\n");
109 sendRetryReq();
110 }
111 }
112
113 void
114 SimpleCache::CPUSidePort::recvFunctional(PacketPtr pkt)
115 {
116 // Just forward to the cache.
117 return owner->handleFunctional(pkt);
118 }
119
120 bool
121 SimpleCache::CPUSidePort::recvTimingReq(PacketPtr pkt)
122 {
123 DPRINTF(SimpleCache, "Got request %s\n", pkt->print());
124
125 if (blockedPacket || needRetry) {
126 // The cache may not be able to send a reply if this is blocked
127 DPRINTF(SimpleCache, "Request blocked\n");
128 needRetry = true;
129 return false;
130 }
131 // Just forward to the cache.
132 if (!owner->handleRequest(pkt, id)) {
133 DPRINTF(SimpleCache, "Request failed\n");
134 // stalling
135 needRetry = true;
136 return false;
137 } else {
138 DPRINTF(SimpleCache, "Request succeeded\n");
139 return true;
140 }
141 }
142
143 void
144 SimpleCache::CPUSidePort::recvRespRetry()
145 {
146 // We should have a blocked packet if this function is called.
147 assert(blockedPacket != nullptr);
148
149 // Grab the blocked packet.
150 PacketPtr pkt = blockedPacket;
151 blockedPacket = nullptr;
152
153 DPRINTF(SimpleCache, "Retrying response pkt %s\n", pkt->print());
154 // Try to resend it. It's possible that it fails again.
155 sendPacket(pkt);
156
157 // We may now be able to accept new packets
158 trySendRetry();
159 }
160
161 void
162 SimpleCache::MemSidePort::sendPacket(PacketPtr pkt)
163 {
164 // Note: This flow control is very simple since the cache is blocking.
165
166 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
167
168 // If we can't send the packet across the port, store it for later.
169 if (!sendTimingReq(pkt)) {
170 blockedPacket = pkt;
171 }
172 }
173
174 bool
175 SimpleCache::MemSidePort::recvTimingResp(PacketPtr pkt)
176 {
177 // Just forward to the cache.
178 return owner->handleResponse(pkt);
179 }
180
181 void
182 SimpleCache::MemSidePort::recvReqRetry()
183 {
184 // We should have a blocked packet if this function is called.
185 assert(blockedPacket != nullptr);
186
187 // Grab the blocked packet.
188 PacketPtr pkt = blockedPacket;
189 blockedPacket = nullptr;
190
191 // Try to resend it. It's possible that it fails again.
192 sendPacket(pkt);
193 }
194
195 void
196 SimpleCache::MemSidePort::recvRangeChange()
197 {
198 owner->sendRangeChange();
199 }
200
201 bool
202 SimpleCache::handleRequest(PacketPtr pkt, int port_id)
203 {
204 if (blocked) {
205 // There is currently an outstanding request so we can't respond. Stall
206 return false;
207 }
208
209 DPRINTF(SimpleCache, "Got request for addr %#x\n", pkt->getAddr());
210
211 // This cache is now blocked waiting for the response to this packet.
212 blocked = true;
213
214 // Store the port for when we get the response
215 assert(waitingPortId == -1);
216 waitingPortId = port_id;
217
218 // Schedule an event after cache access latency to actually access
219 schedule(new EventFunctionWrapper([this, pkt]{ accessTiming(pkt); },
220 name() + ".accessEvent", true),
221 clockEdge(latency));
222
223 return true;
224 }
225
226 bool
227 SimpleCache::handleResponse(PacketPtr pkt)
228 {
229 assert(blocked);
230 DPRINTF(SimpleCache, "Got response for addr %#x\n", pkt->getAddr());
231
232 // For now assume that inserts are off of the critical path and don't count
233 // for any added latency.
234 insert(pkt);
235
236 missLatency.sample(curTick() - missTime);
237
238 // If we had to upgrade the request packet to a full cache line, now we
239 // can use that packet to construct the response.
240 if (originalPacket != nullptr) {
241 DPRINTF(SimpleCache, "Copying data from new packet to old\n");
242 // We had to upgrade a previous packet. We can functionally deal with
243 // the cache access now. It better be a hit.
244 bool hit M5_VAR_USED = accessFunctional(originalPacket);
245 panic_if(!hit, "Should always hit after inserting");
246 originalPacket->makeResponse();
247 delete pkt; // We may need to delay this, I'm not sure.
248 pkt = originalPacket;
249 originalPacket = nullptr;
250 } // else, pkt contains the data it needs
251
252 sendResponse(pkt);
253
254 return true;
255 }
256
257 void SimpleCache::sendResponse(PacketPtr pkt)
258 {
259 assert(blocked);
260 DPRINTF(SimpleCache, "Sending resp for addr %#x\n", pkt->getAddr());
261
262 int port = waitingPortId;
263
264 // The packet is now done. We're about to put it in the port, no need for
265 // this object to continue to stall.
266 // We need to free the resource before sending the packet in case the CPU
267 // tries to send another request immediately (e.g., in the same callchain).
268 blocked = false;
269 waitingPortId = -1;
270
271 // Simply forward to the memory port
272 cpuPorts[port].sendPacket(pkt);
273
274 // For each of the cpu ports, if it needs to send a retry, it should do it
275 // now since this memory object may be unblocked now.
276 for (auto& port : cpuPorts) {
277 port.trySendRetry();
278 }
279 }
280
281 void
282 SimpleCache::handleFunctional(PacketPtr pkt)
283 {
284 if (accessFunctional(pkt)) {
285 pkt->makeResponse();
286 } else {
287 memPort.sendFunctional(pkt);
288 }
289 }
290
291 void
292 SimpleCache::accessTiming(PacketPtr pkt)
293 {
294 bool hit = accessFunctional(pkt);
295
296 DPRINTF(SimpleCache, "%s for packet: %s\n", hit ? "Hit" : "Miss",
297 pkt->print());
298
299 if (hit) {
300 // Respond to the CPU side
301 hits++; // update stats
302 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), pkt->getSize());
303 pkt->makeResponse();
304 sendResponse(pkt);
305 } else {
306 misses++; // update stats
307 missTime = curTick();
308 // Forward to the memory side.
309 // We can't directly forward the packet unless it is exactly the size
310 // of the cache line, and aligned. Check for that here.
311 Addr addr = pkt->getAddr();
312 Addr block_addr = pkt->getBlockAddr(blockSize);
313 unsigned size = pkt->getSize();
314 if (addr == block_addr && size == blockSize) {
315 // Aligned and block size. We can just forward.
316 DPRINTF(SimpleCache, "forwarding packet\n");
317 memPort.sendPacket(pkt);
318 } else {
319 DPRINTF(SimpleCache, "Upgrading packet to block size\n");
320 panic_if(addr - block_addr + size > blockSize,
321 "Cannot handle accesses that span multiple cache lines");
322 // Unaligned access to one cache block
323 assert(pkt->needsResponse());
324 MemCmd cmd;
325 if (pkt->isWrite() || pkt->isRead()) {
326 // Read the data from memory to write into the block.
327 // We'll write the data in the cache (i.e., a writeback cache)
328 cmd = MemCmd::ReadReq;
329 } else {
330 panic("Unknown packet type in upgrade size");
331 }
332
333 // Create a new packet that is blockSize
334 PacketPtr new_pkt = new Packet(pkt->req, cmd, blockSize);
335 new_pkt->allocate();
336
337 // Should now be block aligned
338 assert(new_pkt->getAddr() == new_pkt->getBlockAddr(blockSize));
339
340 // Save the old packet
341 originalPacket = pkt;
342
343 DPRINTF(SimpleCache, "forwarding packet\n");
344 memPort.sendPacket(new_pkt);
345 }
346 }
347 }
348
349 bool
350 SimpleCache::accessFunctional(PacketPtr pkt)
351 {
352 Addr block_addr = pkt->getBlockAddr(blockSize);
353 auto it = cacheStore.find(block_addr);
354 if (it != cacheStore.end()) {
355 if (pkt->isWrite()) {
356 // Write the data into the block in the cache
357 pkt->writeDataToBlock(it->second, blockSize);
358 } else if (pkt->isRead()) {
359 // Read the data out of the cache block into the packet
360 pkt->setDataFromBlock(it->second, blockSize);
361 } else {
362 panic("Unknown packet type!");
363 }
364 return true;
365 }
366 return false;
367 }
368
369 void
370 SimpleCache::insert(PacketPtr pkt)
371 {
372 // The packet should be aligned.
373 assert(pkt->getAddr() == pkt->getBlockAddr(blockSize));
374 // The address should not be in the cache
375 assert(cacheStore.find(pkt->getAddr()) == cacheStore.end());
376 // The pkt should be a response
377 assert(pkt->isResponse());
378
379 if (cacheStore.size() >= capacity) {
380 // Select random thing to evict. This is a little convoluted since we
381 // are using a std::unordered_map. See http://bit.ly/2hrnLP2
382 int bucket, bucket_size;
383 do {
384 bucket = random_mt.random(0, (int)cacheStore.bucket_count() - 1);
385 } while ( (bucket_size = cacheStore.bucket_size(bucket)) == 0 );
386 auto block = std::next(cacheStore.begin(bucket),
387 random_mt.random(0, bucket_size - 1));
388
389 DPRINTF(SimpleCache, "Removing addr %#x\n", block->first);
390
391 // Write back the data.
392 // Create a new request-packet pair
393 RequestPtr req = std::make_shared<Request>(
394 block->first, blockSize, 0, 0);
395
396 PacketPtr new_pkt = new Packet(req, MemCmd::WritebackDirty, blockSize);
397 new_pkt->dataDynamic(block->second); // This will be deleted later
398
399 DPRINTF(SimpleCache, "Writing packet back %s\n", pkt->print());
400 // Send the write to memory
401 memPort.sendPacket(new_pkt);
402
403 // Delete this entry
404 cacheStore.erase(block->first);
405 }
406
407 DPRINTF(SimpleCache, "Inserting %s\n", pkt->print());
408 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), blockSize);
409
410 // Allocate space for the cache block data
411 uint8_t *data = new uint8_t[blockSize];
412
413 // Insert the data and address into the cache store
414 cacheStore[pkt->getAddr()] = data;
415
416 // Write the data into the cache
417 pkt->writeDataToBlock(data, blockSize);
418 }
419
420 AddrRangeList
421 SimpleCache::getAddrRanges() const
422 {
423 DPRINTF(SimpleCache, "Sending new ranges\n");
424 // Just use the same ranges as whatever is on the memory side.
425 return memPort.getAddrRanges();
426 }
427
428 void
429 SimpleCache::sendRangeChange() const
430 {
431 for (auto& port : cpuPorts) {
432 port.sendRangeChange();
433 }
434 }
435
436 void
437 SimpleCache::regStats()
438 {
439 // If you don't do this you get errors about uninitialized stats.
440 MemObject::regStats();
441
442 hits.name(name() + ".hits")
443 .desc("Number of hits")
444 ;
445
446 misses.name(name() + ".misses")
447 .desc("Number of misses")
448 ;
449
450 missLatency.name(name() + ".missLatency")
451 .desc("Ticks for misses to the cache")
452 .init(16) // number of buckets
453 ;
454
455 hitRatio.name(name() + ".hitRatio")
456 .desc("The ratio of hits to the total accesses to the cache")
457 ;
458
459 hitRatio = hits / (hits + misses);
460
461 }
462
463
464 SimpleCache*
465 SimpleCacheParams::create()
466 {
467 return new SimpleCache(this);
468 }