ad5e6631d345533e07966b3ade47e2342ac7bcb4
[gem5.git] / src / learning_gem5 / part2 / simple_cache.cc
1 /*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "learning_gem5/part2/simple_cache.hh"
30
31 #include "base/random.hh"
32 #include "debug/SimpleCache.hh"
33 #include "sim/system.hh"
34
35 SimpleCache::SimpleCache(SimpleCacheParams *params) :
36 ClockedObject(params),
37 latency(params->latency),
38 blockSize(params->system->cacheLineSize()),
39 capacity(params->size / blockSize),
40 memPort(params->name + ".mem_side", this),
41 blocked(false), originalPacket(nullptr), waitingPortId(-1), stats(this)
42 {
43 // Since the CPU side ports are a vector of ports, create an instance of
44 // the CPUSidePort for each connection. This member of params is
45 // automatically created depending on the name of the vector port and
46 // holds the number of connections to this port name
47 for (int i = 0; i < params->port_cpu_side_connection_count; ++i) {
48 cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this);
49 }
50 }
51
52 Port &
53 SimpleCache::getPort(const std::string &if_name, PortID idx)
54 {
55 // This is the name from the Python SimObject declaration in SimpleCache.py
56 if (if_name == "mem_side") {
57 panic_if(idx != InvalidPortID,
58 "Mem side of simple cache not a vector port");
59 return memPort;
60 } else if (if_name == "cpu_side" && idx < cpuPorts.size()) {
61 // We should have already created all of the ports in the constructor
62 return cpuPorts[idx];
63 } else {
64 // pass it along to our super class
65 return ClockedObject::getPort(if_name, idx);
66 }
67 }
68
69 void
70 SimpleCache::CPUSidePort::sendPacket(PacketPtr pkt)
71 {
72 // Note: This flow control is very simple since the cache is blocking.
73
74 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
75
76 // If we can't send the packet across the port, store it for later.
77 DPRINTF(SimpleCache, "Sending %s to CPU\n", pkt->print());
78 if (!sendTimingResp(pkt)) {
79 DPRINTF(SimpleCache, "failed!\n");
80 blockedPacket = pkt;
81 }
82 }
83
84 AddrRangeList
85 SimpleCache::CPUSidePort::getAddrRanges() const
86 {
87 return owner->getAddrRanges();
88 }
89
90 void
91 SimpleCache::CPUSidePort::trySendRetry()
92 {
93 if (needRetry && blockedPacket == nullptr) {
94 // Only send a retry if the port is now completely free
95 needRetry = false;
96 DPRINTF(SimpleCache, "Sending retry req.\n");
97 sendRetryReq();
98 }
99 }
100
101 void
102 SimpleCache::CPUSidePort::recvFunctional(PacketPtr pkt)
103 {
104 // Just forward to the cache.
105 return owner->handleFunctional(pkt);
106 }
107
108 bool
109 SimpleCache::CPUSidePort::recvTimingReq(PacketPtr pkt)
110 {
111 DPRINTF(SimpleCache, "Got request %s\n", pkt->print());
112
113 if (blockedPacket || needRetry) {
114 // The cache may not be able to send a reply if this is blocked
115 DPRINTF(SimpleCache, "Request blocked\n");
116 needRetry = true;
117 return false;
118 }
119 // Just forward to the cache.
120 if (!owner->handleRequest(pkt, id)) {
121 DPRINTF(SimpleCache, "Request failed\n");
122 // stalling
123 needRetry = true;
124 return false;
125 } else {
126 DPRINTF(SimpleCache, "Request succeeded\n");
127 return true;
128 }
129 }
130
131 void
132 SimpleCache::CPUSidePort::recvRespRetry()
133 {
134 // We should have a blocked packet if this function is called.
135 assert(blockedPacket != nullptr);
136
137 // Grab the blocked packet.
138 PacketPtr pkt = blockedPacket;
139 blockedPacket = nullptr;
140
141 DPRINTF(SimpleCache, "Retrying response pkt %s\n", pkt->print());
142 // Try to resend it. It's possible that it fails again.
143 sendPacket(pkt);
144
145 // We may now be able to accept new packets
146 trySendRetry();
147 }
148
149 void
150 SimpleCache::MemSidePort::sendPacket(PacketPtr pkt)
151 {
152 // Note: This flow control is very simple since the cache is blocking.
153
154 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
155
156 // If we can't send the packet across the port, store it for later.
157 if (!sendTimingReq(pkt)) {
158 blockedPacket = pkt;
159 }
160 }
161
162 bool
163 SimpleCache::MemSidePort::recvTimingResp(PacketPtr pkt)
164 {
165 // Just forward to the cache.
166 return owner->handleResponse(pkt);
167 }
168
169 void
170 SimpleCache::MemSidePort::recvReqRetry()
171 {
172 // We should have a blocked packet if this function is called.
173 assert(blockedPacket != nullptr);
174
175 // Grab the blocked packet.
176 PacketPtr pkt = blockedPacket;
177 blockedPacket = nullptr;
178
179 // Try to resend it. It's possible that it fails again.
180 sendPacket(pkt);
181 }
182
183 void
184 SimpleCache::MemSidePort::recvRangeChange()
185 {
186 owner->sendRangeChange();
187 }
188
189 bool
190 SimpleCache::handleRequest(PacketPtr pkt, int port_id)
191 {
192 if (blocked) {
193 // There is currently an outstanding request so we can't respond. Stall
194 return false;
195 }
196
197 DPRINTF(SimpleCache, "Got request for addr %#x\n", pkt->getAddr());
198
199 // This cache is now blocked waiting for the response to this packet.
200 blocked = true;
201
202 // Store the port for when we get the response
203 assert(waitingPortId == -1);
204 waitingPortId = port_id;
205
206 // Schedule an event after cache access latency to actually access
207 schedule(new EventFunctionWrapper([this, pkt]{ accessTiming(pkt); },
208 name() + ".accessEvent", true),
209 clockEdge(latency));
210
211 return true;
212 }
213
214 bool
215 SimpleCache::handleResponse(PacketPtr pkt)
216 {
217 assert(blocked);
218 DPRINTF(SimpleCache, "Got response for addr %#x\n", pkt->getAddr());
219
220 // For now assume that inserts are off of the critical path and don't count
221 // for any added latency.
222 insert(pkt);
223
224 stats.missLatency.sample(curTick() - missTime);
225
226 // If we had to upgrade the request packet to a full cache line, now we
227 // can use that packet to construct the response.
228 if (originalPacket != nullptr) {
229 DPRINTF(SimpleCache, "Copying data from new packet to old\n");
230 // We had to upgrade a previous packet. We can functionally deal with
231 // the cache access now. It better be a hit.
232 bool hit M5_VAR_USED = accessFunctional(originalPacket);
233 panic_if(!hit, "Should always hit after inserting");
234 originalPacket->makeResponse();
235 delete pkt; // We may need to delay this, I'm not sure.
236 pkt = originalPacket;
237 originalPacket = nullptr;
238 } // else, pkt contains the data it needs
239
240 sendResponse(pkt);
241
242 return true;
243 }
244
245 void SimpleCache::sendResponse(PacketPtr pkt)
246 {
247 assert(blocked);
248 DPRINTF(SimpleCache, "Sending resp for addr %#x\n", pkt->getAddr());
249
250 int port = waitingPortId;
251
252 // The packet is now done. We're about to put it in the port, no need for
253 // this object to continue to stall.
254 // We need to free the resource before sending the packet in case the CPU
255 // tries to send another request immediately (e.g., in the same callchain).
256 blocked = false;
257 waitingPortId = -1;
258
259 // Simply forward to the memory port
260 cpuPorts[port].sendPacket(pkt);
261
262 // For each of the cpu ports, if it needs to send a retry, it should do it
263 // now since this memory object may be unblocked now.
264 for (auto& port : cpuPorts) {
265 port.trySendRetry();
266 }
267 }
268
269 void
270 SimpleCache::handleFunctional(PacketPtr pkt)
271 {
272 if (accessFunctional(pkt)) {
273 pkt->makeResponse();
274 } else {
275 memPort.sendFunctional(pkt);
276 }
277 }
278
279 void
280 SimpleCache::accessTiming(PacketPtr pkt)
281 {
282 bool hit = accessFunctional(pkt);
283
284 DPRINTF(SimpleCache, "%s for packet: %s\n", hit ? "Hit" : "Miss",
285 pkt->print());
286
287 if (hit) {
288 // Respond to the CPU side
289 stats.hits++; // update stats
290 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), pkt->getSize());
291 pkt->makeResponse();
292 sendResponse(pkt);
293 } else {
294 stats.misses++; // update stats
295 missTime = curTick();
296 // Forward to the memory side.
297 // We can't directly forward the packet unless it is exactly the size
298 // of the cache line, and aligned. Check for that here.
299 Addr addr = pkt->getAddr();
300 Addr block_addr = pkt->getBlockAddr(blockSize);
301 unsigned size = pkt->getSize();
302 if (addr == block_addr && size == blockSize) {
303 // Aligned and block size. We can just forward.
304 DPRINTF(SimpleCache, "forwarding packet\n");
305 memPort.sendPacket(pkt);
306 } else {
307 DPRINTF(SimpleCache, "Upgrading packet to block size\n");
308 panic_if(addr - block_addr + size > blockSize,
309 "Cannot handle accesses that span multiple cache lines");
310 // Unaligned access to one cache block
311 assert(pkt->needsResponse());
312 MemCmd cmd;
313 if (pkt->isWrite() || pkt->isRead()) {
314 // Read the data from memory to write into the block.
315 // We'll write the data in the cache (i.e., a writeback cache)
316 cmd = MemCmd::ReadReq;
317 } else {
318 panic("Unknown packet type in upgrade size");
319 }
320
321 // Create a new packet that is blockSize
322 PacketPtr new_pkt = new Packet(pkt->req, cmd, blockSize);
323 new_pkt->allocate();
324
325 // Should now be block aligned
326 assert(new_pkt->getAddr() == new_pkt->getBlockAddr(blockSize));
327
328 // Save the old packet
329 originalPacket = pkt;
330
331 DPRINTF(SimpleCache, "forwarding packet\n");
332 memPort.sendPacket(new_pkt);
333 }
334 }
335 }
336
337 bool
338 SimpleCache::accessFunctional(PacketPtr pkt)
339 {
340 Addr block_addr = pkt->getBlockAddr(blockSize);
341 auto it = cacheStore.find(block_addr);
342 if (it != cacheStore.end()) {
343 if (pkt->isWrite()) {
344 // Write the data into the block in the cache
345 pkt->writeDataToBlock(it->second, blockSize);
346 } else if (pkt->isRead()) {
347 // Read the data out of the cache block into the packet
348 pkt->setDataFromBlock(it->second, blockSize);
349 } else {
350 panic("Unknown packet type!");
351 }
352 return true;
353 }
354 return false;
355 }
356
357 void
358 SimpleCache::insert(PacketPtr pkt)
359 {
360 // The packet should be aligned.
361 assert(pkt->getAddr() == pkt->getBlockAddr(blockSize));
362 // The address should not be in the cache
363 assert(cacheStore.find(pkt->getAddr()) == cacheStore.end());
364 // The pkt should be a response
365 assert(pkt->isResponse());
366
367 if (cacheStore.size() >= capacity) {
368 // Select random thing to evict. This is a little convoluted since we
369 // are using a std::unordered_map. See http://bit.ly/2hrnLP2
370 int bucket, bucket_size;
371 do {
372 bucket = random_mt.random(0, (int)cacheStore.bucket_count() - 1);
373 } while ( (bucket_size = cacheStore.bucket_size(bucket)) == 0 );
374 auto block = std::next(cacheStore.begin(bucket),
375 random_mt.random(0, bucket_size - 1));
376
377 DPRINTF(SimpleCache, "Removing addr %#x\n", block->first);
378
379 // Write back the data.
380 // Create a new request-packet pair
381 RequestPtr req = std::make_shared<Request>(
382 block->first, blockSize, 0, 0);
383
384 PacketPtr new_pkt = new Packet(req, MemCmd::WritebackDirty, blockSize);
385 new_pkt->dataDynamic(block->second); // This will be deleted later
386
387 DPRINTF(SimpleCache, "Writing packet back %s\n", pkt->print());
388 // Send the write to memory
389 memPort.sendPacket(new_pkt);
390
391 // Delete this entry
392 cacheStore.erase(block->first);
393 }
394
395 DPRINTF(SimpleCache, "Inserting %s\n", pkt->print());
396 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), blockSize);
397
398 // Allocate space for the cache block data
399 uint8_t *data = new uint8_t[blockSize];
400
401 // Insert the data and address into the cache store
402 cacheStore[pkt->getAddr()] = data;
403
404 // Write the data into the cache
405 pkt->writeDataToBlock(data, blockSize);
406 }
407
408 AddrRangeList
409 SimpleCache::getAddrRanges() const
410 {
411 DPRINTF(SimpleCache, "Sending new ranges\n");
412 // Just use the same ranges as whatever is on the memory side.
413 return memPort.getAddrRanges();
414 }
415
416 void
417 SimpleCache::sendRangeChange() const
418 {
419 for (auto& port : cpuPorts) {
420 port.sendRangeChange();
421 }
422 }
423
424 SimpleCache::SimpleCacheStats::SimpleCacheStats(Stats::Group *parent)
425 : Stats::Group(parent),
426 ADD_STAT(hits, "Number of hits"),
427 ADD_STAT(misses, "Number of misses"),
428 ADD_STAT(missLatency, "Ticks for misses to the cache"),
429 ADD_STAT(hitRatio, "The ratio of hits to the total"
430 "accesses to the cache", hits / (hits + misses))
431 {
432 missLatency.init(16); // number of buckets
433 }
434
435
436 SimpleCache*
437 SimpleCacheParams::create()
438 {
439 return new SimpleCache(this);
440 }