misc: Replaced master/slave terminology
[gem5.git] / src / learning_gem5 / part2 / simple_cache.cc
1 /*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "learning_gem5/part2/simple_cache.hh"
30
31 #include "base/random.hh"
32 #include "debug/SimpleCache.hh"
33 #include "sim/system.hh"
34
35 SimpleCache::SimpleCache(SimpleCacheParams *params) :
36 ClockedObject(params),
37 latency(params->latency),
38 blockSize(params->system->cacheLineSize()),
39 capacity(params->size / blockSize),
40 memPort(params->name + ".mem_side", this),
41 blocked(false), originalPacket(nullptr), waitingPortId(-1), stats(this)
42 {
43 // Since the CPU side ports are a vector of ports, create an instance of
44 // the CPUSidePort for each connection. This member of params is
45 // automatically created depending on the name of the vector port and
46 // holds the number of connections to this port name
47 for (int i = 0; i < params->port_cpu_side_connection_count; ++i) {
48 cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i),
49 i, this);
50 }
51 }
52
53 Port &
54 SimpleCache::getPort(const std::string &if_name, PortID idx)
55 {
56 // This is the name from the Python SimObject declaration in SimpleCache.py
57 if (if_name == "mem_side") {
58 panic_if(idx != InvalidPortID,
59 "Mem side of simple cache not a vector port");
60 return memPort;
61 } else if (if_name == "cpu_side" && idx < cpuPorts.size()) {
62 // We should have already created all of the ports in the constructor
63 return cpuPorts[idx];
64 } else {
65 // pass it along to our super class
66 return ClockedObject::getPort(if_name, idx);
67 }
68 }
69
70 void
71 SimpleCache::CPUSidePort::sendPacket(PacketPtr pkt)
72 {
73 // Note: This flow control is very simple since the cache is blocking.
74
75 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
76
77 // If we can't send the packet across the port, store it for later.
78 DPRINTF(SimpleCache, "Sending %s to CPU\n", pkt->print());
79 if (!sendTimingResp(pkt)) {
80 DPRINTF(SimpleCache, "failed!\n");
81 blockedPacket = pkt;
82 }
83 }
84
85 AddrRangeList
86 SimpleCache::CPUSidePort::getAddrRanges() const
87 {
88 return owner->getAddrRanges();
89 }
90
91 void
92 SimpleCache::CPUSidePort::trySendRetry()
93 {
94 if (needRetry && blockedPacket == nullptr) {
95 // Only send a retry if the port is now completely free
96 needRetry = false;
97 DPRINTF(SimpleCache, "Sending retry req.\n");
98 sendRetryReq();
99 }
100 }
101
102 void
103 SimpleCache::CPUSidePort::recvFunctional(PacketPtr pkt)
104 {
105 // Just forward to the cache.
106 return owner->handleFunctional(pkt);
107 }
108
109 bool
110 SimpleCache::CPUSidePort::recvTimingReq(PacketPtr pkt)
111 {
112 DPRINTF(SimpleCache, "Got request %s\n", pkt->print());
113
114 if (blockedPacket || needRetry) {
115 // The cache may not be able to send a reply if this is blocked
116 DPRINTF(SimpleCache, "Request blocked\n");
117 needRetry = true;
118 return false;
119 }
120 // Just forward to the cache.
121 if (!owner->handleRequest(pkt, id)) {
122 DPRINTF(SimpleCache, "Request failed\n");
123 // stalling
124 needRetry = true;
125 return false;
126 } else {
127 DPRINTF(SimpleCache, "Request succeeded\n");
128 return true;
129 }
130 }
131
132 void
133 SimpleCache::CPUSidePort::recvRespRetry()
134 {
135 // We should have a blocked packet if this function is called.
136 assert(blockedPacket != nullptr);
137
138 // Grab the blocked packet.
139 PacketPtr pkt = blockedPacket;
140 blockedPacket = nullptr;
141
142 DPRINTF(SimpleCache, "Retrying response pkt %s\n", pkt->print());
143 // Try to resend it. It's possible that it fails again.
144 sendPacket(pkt);
145
146 // We may now be able to accept new packets
147 trySendRetry();
148 }
149
150 void
151 SimpleCache::MemSidePort::sendPacket(PacketPtr pkt)
152 {
153 // Note: This flow control is very simple since the cache is blocking.
154
155 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
156
157 // If we can't send the packet across the port, store it for later.
158 if (!sendTimingReq(pkt)) {
159 blockedPacket = pkt;
160 }
161 }
162
163 bool
164 SimpleCache::MemSidePort::recvTimingResp(PacketPtr pkt)
165 {
166 // Just forward to the cache.
167 return owner->handleResponse(pkt);
168 }
169
170 void
171 SimpleCache::MemSidePort::recvReqRetry()
172 {
173 // We should have a blocked packet if this function is called.
174 assert(blockedPacket != nullptr);
175
176 // Grab the blocked packet.
177 PacketPtr pkt = blockedPacket;
178 blockedPacket = nullptr;
179
180 // Try to resend it. It's possible that it fails again.
181 sendPacket(pkt);
182 }
183
184 void
185 SimpleCache::MemSidePort::recvRangeChange()
186 {
187 owner->sendRangeChange();
188 }
189
190 bool
191 SimpleCache::handleRequest(PacketPtr pkt, int port_id)
192 {
193 if (blocked) {
194 // There is currently an outstanding request so we can't respond. Stall
195 return false;
196 }
197
198 DPRINTF(SimpleCache, "Got request for addr %#x\n", pkt->getAddr());
199
200 // This cache is now blocked waiting for the response to this packet.
201 blocked = true;
202
203 // Store the port for when we get the response
204 assert(waitingPortId == -1);
205 waitingPortId = port_id;
206
207 // Schedule an event after cache access latency to actually access
208 schedule(new EventFunctionWrapper([this, pkt]{ accessTiming(pkt); },
209 name() + ".accessEvent", true),
210 clockEdge(latency));
211
212 return true;
213 }
214
215 bool
216 SimpleCache::handleResponse(PacketPtr pkt)
217 {
218 assert(blocked);
219 DPRINTF(SimpleCache, "Got response for addr %#x\n", pkt->getAddr());
220
221 // For now assume that inserts are off of the critical path and don't count
222 // for any added latency.
223 insert(pkt);
224
225 stats.missLatency.sample(curTick() - missTime);
226
227 // If we had to upgrade the request packet to a full cache line, now we
228 // can use that packet to construct the response.
229 if (originalPacket != nullptr) {
230 DPRINTF(SimpleCache, "Copying data from new packet to old\n");
231 // We had to upgrade a previous packet. We can functionally deal with
232 // the cache access now. It better be a hit.
233 bool hit M5_VAR_USED = accessFunctional(originalPacket);
234 panic_if(!hit, "Should always hit after inserting");
235 originalPacket->makeResponse();
236 delete pkt; // We may need to delay this, I'm not sure.
237 pkt = originalPacket;
238 originalPacket = nullptr;
239 } // else, pkt contains the data it needs
240
241 sendResponse(pkt);
242
243 return true;
244 }
245
246 void SimpleCache::sendResponse(PacketPtr pkt)
247 {
248 assert(blocked);
249 DPRINTF(SimpleCache, "Sending resp for addr %#x\n", pkt->getAddr());
250
251 int port = waitingPortId;
252
253 // The packet is now done. We're about to put it in the port, no need for
254 // this object to continue to stall.
255 // We need to free the resource before sending the packet in case the CPU
256 // tries to send another request immediately (e.g., in the same callchain).
257 blocked = false;
258 waitingPortId = -1;
259
260 // Simply forward to the memory port
261 cpuPorts[port].sendPacket(pkt);
262
263 // For each of the cpu ports, if it needs to send a retry, it should do it
264 // now since this memory object may be unblocked now.
265 for (auto& port : cpuPorts) {
266 port.trySendRetry();
267 }
268 }
269
270 void
271 SimpleCache::handleFunctional(PacketPtr pkt)
272 {
273 if (accessFunctional(pkt)) {
274 pkt->makeResponse();
275 } else {
276 memPort.sendFunctional(pkt);
277 }
278 }
279
280 void
281 SimpleCache::accessTiming(PacketPtr pkt)
282 {
283 bool hit = accessFunctional(pkt);
284
285 DPRINTF(SimpleCache, "%s for packet: %s\n", hit ? "Hit" : "Miss",
286 pkt->print());
287
288 if (hit) {
289 // Respond to the CPU side
290 stats.hits++; // update stats
291 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), pkt->getSize());
292 pkt->makeResponse();
293 sendResponse(pkt);
294 } else {
295 stats.misses++; // update stats
296 missTime = curTick();
297 // Forward to the memory side.
298 // We can't directly forward the packet unless it is exactly the size
299 // of the cache line, and aligned. Check for that here.
300 Addr addr = pkt->getAddr();
301 Addr block_addr = pkt->getBlockAddr(blockSize);
302 unsigned size = pkt->getSize();
303 if (addr == block_addr && size == blockSize) {
304 // Aligned and block size. We can just forward.
305 DPRINTF(SimpleCache, "forwarding packet\n");
306 memPort.sendPacket(pkt);
307 } else {
308 DPRINTF(SimpleCache, "Upgrading packet to block size\n");
309 panic_if(addr - block_addr + size > blockSize,
310 "Cannot handle accesses that span multiple cache lines");
311 // Unaligned access to one cache block
312 assert(pkt->needsResponse());
313 MemCmd cmd;
314 if (pkt->isWrite() || pkt->isRead()) {
315 // Read the data from memory to write into the block.
316 // We'll write the data in the cache (i.e., a writeback cache)
317 cmd = MemCmd::ReadReq;
318 } else {
319 panic("Unknown packet type in upgrade size");
320 }
321
322 // Create a new packet that is blockSize
323 PacketPtr new_pkt = new Packet(pkt->req, cmd, blockSize);
324 new_pkt->allocate();
325
326 // Should now be block aligned
327 assert(new_pkt->getAddr() == new_pkt->getBlockAddr(blockSize));
328
329 // Save the old packet
330 originalPacket = pkt;
331
332 DPRINTF(SimpleCache, "forwarding packet\n");
333 memPort.sendPacket(new_pkt);
334 }
335 }
336 }
337
338 bool
339 SimpleCache::accessFunctional(PacketPtr pkt)
340 {
341 Addr block_addr = pkt->getBlockAddr(blockSize);
342 auto it = cacheStore.find(block_addr);
343 if (it != cacheStore.end()) {
344 if (pkt->isWrite()) {
345 // Write the data into the block in the cache
346 pkt->writeDataToBlock(it->second, blockSize);
347 } else if (pkt->isRead()) {
348 // Read the data out of the cache block into the packet
349 pkt->setDataFromBlock(it->second, blockSize);
350 } else {
351 panic("Unknown packet type!");
352 }
353 return true;
354 }
355 return false;
356 }
357
358 void
359 SimpleCache::insert(PacketPtr pkt)
360 {
361 // The packet should be aligned.
362 assert(pkt->getAddr() == pkt->getBlockAddr(blockSize));
363 // The address should not be in the cache
364 assert(cacheStore.find(pkt->getAddr()) == cacheStore.end());
365 // The pkt should be a response
366 assert(pkt->isResponse());
367
368 if (cacheStore.size() >= capacity) {
369 // Select random thing to evict. This is a little convoluted since we
370 // are using a std::unordered_map. See http://bit.ly/2hrnLP2
371 int bucket, bucket_size;
372 do {
373 bucket = random_mt.random(0, (int)cacheStore.bucket_count() - 1);
374 } while ( (bucket_size = cacheStore.bucket_size(bucket)) == 0 );
375 auto block = std::next(cacheStore.begin(bucket),
376 random_mt.random(0, bucket_size - 1));
377
378 DPRINTF(SimpleCache, "Removing addr %#x\n", block->first);
379
380 // Write back the data.
381 // Create a new request-packet pair
382 RequestPtr req = std::make_shared<Request>(
383 block->first, blockSize, 0, 0);
384
385 PacketPtr new_pkt = new Packet(req, MemCmd::WritebackDirty, blockSize);
386 new_pkt->dataDynamic(block->second); // This will be deleted later
387
388 DPRINTF(SimpleCache, "Writing packet back %s\n", pkt->print());
389 // Send the write to memory
390 memPort.sendPacket(new_pkt);
391
392 // Delete this entry
393 cacheStore.erase(block->first);
394 }
395
396 DPRINTF(SimpleCache, "Inserting %s\n", pkt->print());
397 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), blockSize);
398
399 // Allocate space for the cache block data
400 uint8_t *data = new uint8_t[blockSize];
401
402 // Insert the data and address into the cache store
403 cacheStore[pkt->getAddr()] = data;
404
405 // Write the data into the cache
406 pkt->writeDataToBlock(data, blockSize);
407 }
408
409 AddrRangeList
410 SimpleCache::getAddrRanges() const
411 {
412 DPRINTF(SimpleCache, "Sending new ranges\n");
413 // Just use the same ranges as whatever is on the memory side.
414 return memPort.getAddrRanges();
415 }
416
417 void
418 SimpleCache::sendRangeChange() const
419 {
420 for (auto& port : cpuPorts) {
421 port.sendRangeChange();
422 }
423 }
424
425 SimpleCache::SimpleCacheStats::SimpleCacheStats(Stats::Group *parent)
426 : Stats::Group(parent),
427 ADD_STAT(hits, "Number of hits"),
428 ADD_STAT(misses, "Number of misses"),
429 ADD_STAT(missLatency, "Ticks for misses to the cache"),
430 ADD_STAT(hitRatio, "The ratio of hits to the total"
431 "accesses to the cache", hits / (hits + misses))
432 {
433 missLatency.init(16); // number of buckets
434 }
435
436
437 SimpleCache*
438 SimpleCacheParams::create()
439 {
440 return new SimpleCache(this);
441 }