mem-cache: Fix setting prefetch bit
[gem5.git] / src / mem / xbar.cc
1 /*
2 * Copyright (c) 2011-2015, 2018-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /**
42 * @file
43 * Definition of a crossbar object.
44 */
45
46 #include "mem/xbar.hh"
47
48 #include "base/logging.hh"
49 #include "base/trace.hh"
50 #include "debug/AddrRanges.hh"
51 #include "debug/Drain.hh"
52 #include "debug/XBar.hh"
53
54 BaseXBar::BaseXBar(const BaseXBarParams &p)
55 : ClockedObject(p),
56 frontendLatency(p.frontend_latency),
57 forwardLatency(p.forward_latency),
58 responseLatency(p.response_latency),
59 headerLatency(p.header_latency),
60 width(p.width),
61 gotAddrRanges(p.port_default_connection_count +
62 p.port_mem_side_ports_connection_count, false),
63 gotAllAddrRanges(false), defaultPortID(InvalidPortID),
64 useDefaultRange(p.use_default_range),
65
66 transDist(this, "trans_dist", "Transaction distribution"),
67 pktCount(this, "pkt_count",
68 "Packet count per connected requestor and responder (bytes)"),
69 pktSize(this, "pkt_size", "Cumulative packet size per connected "
70 "requestor and responder (bytes)")
71 {
72 }
73
74 BaseXBar::~BaseXBar()
75 {
76 for (auto port: memSidePorts)
77 delete port;
78
79 for (auto port: cpuSidePorts)
80 delete port;
81 }
82
83 Port &
84 BaseXBar::getPort(const std::string &if_name, PortID idx)
85 {
86 if (if_name == "mem_side_ports" && idx < memSidePorts.size()) {
87 // the memory-side ports index translates directly to the vector
88 // position
89 return *memSidePorts[idx];
90 } else if (if_name == "default") {
91 return *memSidePorts[defaultPortID];
92 } else if (if_name == "cpu_side_ports" && idx < cpuSidePorts.size()) {
93 // the CPU-side ports index translates directly to the vector position
94 return *cpuSidePorts[idx];
95 } else {
96 return ClockedObject::getPort(if_name, idx);
97 }
98 }
99
100 void
101 BaseXBar::calcPacketTiming(PacketPtr pkt, Tick header_delay)
102 {
103 // the crossbar will be called at a time that is not necessarily
104 // coinciding with its own clock, so start by determining how long
105 // until the next clock edge (could be zero)
106 Tick offset = clockEdge() - curTick();
107
108 // the header delay depends on the path through the crossbar, and
109 // we therefore rely on the caller to provide the actual
110 // value
111 pkt->headerDelay += offset + header_delay;
112
113 // note that we add the header delay to the existing value, and
114 // align it to the crossbar clock
115
116 // do a quick sanity check to ensure the timings are not being
117 // ignored, note that this specific value may cause problems for
118 // slower interconnects
119 panic_if(pkt->headerDelay > SimClock::Int::us,
120 "Encountered header delay exceeding 1 us\n");
121
122 if (pkt->hasData()) {
123 // the payloadDelay takes into account the relative time to
124 // deliver the payload of the packet, after the header delay,
125 // we take the maximum since the payload delay could already
126 // be longer than what this parcitular crossbar enforces.
127 pkt->payloadDelay = std::max<Tick>(pkt->payloadDelay,
128 divCeil(pkt->getSize(), width) *
129 clockPeriod());
130 }
131
132 // the payload delay is not paying for the clock offset as that is
133 // already done using the header delay, and the payload delay is
134 // also used to determine how long the crossbar layer is busy and
135 // thus regulates throughput
136 }
137
138 template <typename SrcType, typename DstType>
139 BaseXBar::Layer<SrcType, DstType>::Layer(DstType& _port, BaseXBar& _xbar,
140 const std::string& _name) :
141 Stats::Group(&_xbar, _name.c_str()),
142 port(_port), xbar(_xbar), _name(xbar.name() + "." + _name), state(IDLE),
143 waitingForPeer(NULL), releaseEvent([this]{ releaseLayer(); }, name()),
144 ADD_STAT(occupancy, "Layer occupancy (ticks)"),
145 ADD_STAT(utilization, "Layer utilization (%)")
146 {
147 occupancy
148 .flags(Stats::nozero);
149
150 utilization
151 .precision(1)
152 .flags(Stats::nozero);
153
154 utilization = 100 * occupancy / simTicks;
155 }
156
157 template <typename SrcType, typename DstType>
158 void BaseXBar::Layer<SrcType, DstType>::occupyLayer(Tick until)
159 {
160 // ensure the state is busy at this point, as the layer should
161 // transition from idle as soon as it has decided to forward the
162 // packet to prevent any follow-on calls to sendTiming seeing an
163 // unoccupied layer
164 assert(state == BUSY);
165
166 // until should never be 0 as express snoops never occupy the layer
167 assert(until != 0);
168 xbar.schedule(releaseEvent, until);
169
170 // account for the occupied ticks
171 occupancy += until - curTick();
172
173 DPRINTF(BaseXBar, "The crossbar layer is now busy from tick %d to %d\n",
174 curTick(), until);
175 }
176
177 template <typename SrcType, typename DstType>
178 bool
179 BaseXBar::Layer<SrcType, DstType>::tryTiming(SrcType* src_port)
180 {
181 // if we are in the retry state, we will not see anything but the
182 // retrying port (or in the case of the snoop ports the snoop
183 // response port that mirrors the actual CPU-side port) as we leave
184 // this state again in zero time if the peer does not immediately
185 // call the layer when receiving the retry
186
187 // first we see if the layer is busy, next we check if the
188 // destination port is already engaged in a transaction waiting
189 // for a retry from the peer
190 if (state == BUSY || waitingForPeer != NULL) {
191 // the port should not be waiting already
192 assert(std::find(waitingForLayer.begin(), waitingForLayer.end(),
193 src_port) == waitingForLayer.end());
194
195 // put the port at the end of the retry list waiting for the
196 // layer to be freed up (and in the case of a busy peer, for
197 // that transaction to go through, and then the layer to free
198 // up)
199 waitingForLayer.push_back(src_port);
200 return false;
201 }
202
203 state = BUSY;
204
205 return true;
206 }
207
208 template <typename SrcType, typename DstType>
209 void
210 BaseXBar::Layer<SrcType, DstType>::succeededTiming(Tick busy_time)
211 {
212 // we should have gone from idle or retry to busy in the tryTiming
213 // test
214 assert(state == BUSY);
215
216 // occupy the layer accordingly
217 occupyLayer(busy_time);
218 }
219
220 template <typename SrcType, typename DstType>
221 void
222 BaseXBar::Layer<SrcType, DstType>::failedTiming(SrcType* src_port,
223 Tick busy_time)
224 {
225 // ensure no one got in between and tried to send something to
226 // this port
227 assert(waitingForPeer == NULL);
228
229 // if the source port is the current retrying one or not, we have
230 // failed in forwarding and should track that we are now waiting
231 // for the peer to send a retry
232 waitingForPeer = src_port;
233
234 // we should have gone from idle or retry to busy in the tryTiming
235 // test
236 assert(state == BUSY);
237
238 // occupy the bus accordingly
239 occupyLayer(busy_time);
240 }
241
242 template <typename SrcType, typename DstType>
243 void
244 BaseXBar::Layer<SrcType, DstType>::releaseLayer()
245 {
246 // releasing the bus means we should now be idle
247 assert(state == BUSY);
248 assert(!releaseEvent.scheduled());
249
250 // update the state
251 state = IDLE;
252
253 // bus layer is now idle, so if someone is waiting we can retry
254 if (!waitingForLayer.empty()) {
255 // there is no point in sending a retry if someone is still
256 // waiting for the peer
257 if (waitingForPeer == NULL)
258 retryWaiting();
259 } else if (waitingForPeer == NULL && drainState() == DrainState::Draining) {
260 DPRINTF(Drain, "Crossbar done draining, signaling drain manager\n");
261 //If we weren't able to drain before, do it now.
262 signalDrainDone();
263 }
264 }
265
266 template <typename SrcType, typename DstType>
267 void
268 BaseXBar::Layer<SrcType, DstType>::retryWaiting()
269 {
270 // this should never be called with no one waiting
271 assert(!waitingForLayer.empty());
272
273 // we always go to retrying from idle
274 assert(state == IDLE);
275
276 // update the state
277 state = RETRY;
278
279 // set the retrying port to the front of the retry list and pop it
280 // off the list
281 SrcType* retryingPort = waitingForLayer.front();
282 waitingForLayer.pop_front();
283
284 // tell the port to retry, which in some cases ends up calling the
285 // layer again
286 sendRetry(retryingPort);
287
288 // If the layer is still in the retry state, sendTiming wasn't
289 // called in zero time (e.g. the cache does this when a writeback
290 // is squashed)
291 if (state == RETRY) {
292 // update the state to busy and reset the retrying port, we
293 // have done our bit and sent the retry
294 state = BUSY;
295
296 // occupy the crossbar layer until the next clock edge
297 occupyLayer(xbar.clockEdge());
298 }
299 }
300
301 template <typename SrcType, typename DstType>
302 void
303 BaseXBar::Layer<SrcType, DstType>::recvRetry()
304 {
305 // we should never get a retry without having failed to forward
306 // something to this port
307 assert(waitingForPeer != NULL);
308
309 // add the port where the failed packet originated to the front of
310 // the waiting ports for the layer, this allows us to call retry
311 // on the port immediately if the crossbar layer is idle
312 waitingForLayer.push_front(waitingForPeer);
313
314 // we are no longer waiting for the peer
315 waitingForPeer = NULL;
316
317 // if the layer is idle, retry this port straight away, if we
318 // are busy, then simply let the port wait for its turn
319 if (state == IDLE) {
320 retryWaiting();
321 } else {
322 assert(state == BUSY);
323 }
324 }
325
326 PortID
327 BaseXBar::findPort(AddrRange addr_range)
328 {
329 // we should never see any address lookups before we've got the
330 // ranges of all connected CPU-side-port modules
331 assert(gotAllAddrRanges);
332
333 // Check the address map interval tree
334 auto i = portMap.contains(addr_range);
335 if (i != portMap.end()) {
336 return i->second;
337 }
338
339 // Check if this matches the default range
340 if (useDefaultRange) {
341 if (addr_range.isSubset(defaultRange)) {
342 DPRINTF(AddrRanges, " found addr %s on default\n",
343 addr_range.to_string());
344 return defaultPortID;
345 }
346 } else if (defaultPortID != InvalidPortID) {
347 DPRINTF(AddrRanges, "Unable to find destination for %s, "
348 "will use default port\n", addr_range.to_string());
349 return defaultPortID;
350 }
351
352 // we should use the range for the default port and it did not
353 // match, or the default port is not set
354 fatal("Unable to find destination for %s on %s\n", addr_range.to_string(),
355 name());
356 }
357
358 /** Function called by the port when the crossbar is receiving a range change.*/
359 void
360 BaseXBar::recvRangeChange(PortID mem_side_port_id)
361 {
362 DPRINTF(AddrRanges, "Received range change from cpu_side_ports %s\n",
363 memSidePorts[mem_side_port_id]->getPeer());
364
365 // remember that we got a range from this memory-side port and thus the
366 // connected CPU-side-port module
367 gotAddrRanges[mem_side_port_id] = true;
368
369 // update the global flag
370 if (!gotAllAddrRanges) {
371 // take a logical AND of all the ports and see if we got
372 // ranges from everyone
373 gotAllAddrRanges = true;
374 std::vector<bool>::const_iterator r = gotAddrRanges.begin();
375 while (gotAllAddrRanges && r != gotAddrRanges.end()) {
376 gotAllAddrRanges &= *r++;
377 }
378 if (gotAllAddrRanges)
379 DPRINTF(AddrRanges, "Got address ranges from all responders\n");
380 }
381
382 // note that we could get the range from the default port at any
383 // point in time, and we cannot assume that the default range is
384 // set before the other ones are, so we do additional checks once
385 // all ranges are provided
386 if (mem_side_port_id == defaultPortID) {
387 // only update if we are indeed checking ranges for the
388 // default port since the port might not have a valid range
389 // otherwise
390 if (useDefaultRange) {
391 AddrRangeList ranges = memSidePorts[mem_side_port_id]->
392 getAddrRanges();
393
394 if (ranges.size() != 1)
395 fatal("Crossbar %s may only have a single default range",
396 name());
397
398 defaultRange = ranges.front();
399 }
400 } else {
401 // the ports are allowed to update their address ranges
402 // dynamically, so remove any existing entries
403 if (gotAddrRanges[mem_side_port_id]) {
404 for (auto p = portMap.begin(); p != portMap.end(); ) {
405 if (p->second == mem_side_port_id)
406 // erasing invalidates the iterator, so advance it
407 // before the deletion takes place
408 portMap.erase(p++);
409 else
410 p++;
411 }
412 }
413
414 AddrRangeList ranges = memSidePorts[mem_side_port_id]->
415 getAddrRanges();
416
417 for (const auto& r: ranges) {
418 DPRINTF(AddrRanges, "Adding range %s for id %d\n",
419 r.to_string(), mem_side_port_id);
420 if (portMap.insert(r, mem_side_port_id) == portMap.end()) {
421 PortID conflict_id = portMap.intersects(r)->second;
422 fatal("%s has two ports responding within range "
423 "%s:\n\t%s\n\t%s\n",
424 name(),
425 r.to_string(),
426 memSidePorts[mem_side_port_id]->getPeer(),
427 memSidePorts[conflict_id]->getPeer());
428 }
429 }
430 }
431
432 // if we have received ranges from all our neighbouring CPU-side-port
433 // modules, go ahead and tell our connected memory-side-port modules in
434 // turn, this effectively assumes a tree structure of the system
435 if (gotAllAddrRanges) {
436 DPRINTF(AddrRanges, "Aggregating address ranges\n");
437 xbarRanges.clear();
438
439 // start out with the default range
440 if (useDefaultRange) {
441 if (!gotAddrRanges[defaultPortID])
442 fatal("Crossbar %s uses default range, but none provided",
443 name());
444
445 xbarRanges.push_back(defaultRange);
446 DPRINTF(AddrRanges, "-- Adding default %s\n",
447 defaultRange.to_string());
448 }
449
450 // merge all interleaved ranges and add any range that is not
451 // a subset of the default range
452 std::vector<AddrRange> intlv_ranges;
453 for (const auto& r: portMap) {
454 // if the range is interleaved then save it for now
455 if (r.first.interleaved()) {
456 // if we already got interleaved ranges that are not
457 // part of the same range, then first do a merge
458 // before we add the new one
459 if (!intlv_ranges.empty() &&
460 !intlv_ranges.back().mergesWith(r.first)) {
461 DPRINTF(AddrRanges, "-- Merging range from %d ranges\n",
462 intlv_ranges.size());
463 AddrRange merged_range(intlv_ranges);
464 // next decide if we keep the merged range or not
465 if (!(useDefaultRange &&
466 merged_range.isSubset(defaultRange))) {
467 xbarRanges.push_back(merged_range);
468 DPRINTF(AddrRanges, "-- Adding merged range %s\n",
469 merged_range.to_string());
470 }
471 intlv_ranges.clear();
472 }
473 intlv_ranges.push_back(r.first);
474 } else {
475 // keep the current range if not a subset of the default
476 if (!(useDefaultRange &&
477 r.first.isSubset(defaultRange))) {
478 xbarRanges.push_back(r.first);
479 DPRINTF(AddrRanges, "-- Adding range %s\n",
480 r.first.to_string());
481 }
482 }
483 }
484
485 // if there is still interleaved ranges waiting to be merged,
486 // go ahead and do it
487 if (!intlv_ranges.empty()) {
488 DPRINTF(AddrRanges, "-- Merging range from %d ranges\n",
489 intlv_ranges.size());
490 AddrRange merged_range(intlv_ranges);
491 if (!(useDefaultRange && merged_range.isSubset(defaultRange))) {
492 xbarRanges.push_back(merged_range);
493 DPRINTF(AddrRanges, "-- Adding merged range %s\n",
494 merged_range.to_string());
495 }
496 }
497
498 // also check that no range partially intersects with the
499 // default range, this has to be done after all ranges are set
500 // as there are no guarantees for when the default range is
501 // update with respect to the other ones
502 if (useDefaultRange) {
503 for (const auto& r: xbarRanges) {
504 // see if the new range is partially
505 // overlapping the default range
506 if (r.intersects(defaultRange) &&
507 !r.isSubset(defaultRange))
508 fatal("Range %s intersects the " \
509 "default range of %s but is not a " \
510 "subset\n", r.to_string(), name());
511 }
512 }
513
514 // tell all our neighbouring memory-side ports that our address
515 // ranges have changed
516 for (const auto& port: cpuSidePorts)
517 port->sendRangeChange();
518 }
519 }
520
521 AddrRangeList
522 BaseXBar::getAddrRanges() const
523 {
524 // we should never be asked without first having sent a range
525 // change, and the latter is only done once we have all the ranges
526 // of the connected devices
527 assert(gotAllAddrRanges);
528
529 // at the moment, this never happens, as there are no cycles in
530 // the range queries and no devices on the memory side of a crossbar
531 // (CPU, cache, bridge etc) actually care about the ranges of the
532 // ports they are connected to
533
534 DPRINTF(AddrRanges, "Received address range request\n");
535
536 return xbarRanges;
537 }
538
539 void
540 BaseXBar::regStats()
541 {
542 ClockedObject::regStats();
543
544 using namespace Stats;
545
546 transDist
547 .init(MemCmd::NUM_MEM_CMDS)
548 .flags(nozero);
549
550 // get the string representation of the commands
551 for (int i = 0; i < MemCmd::NUM_MEM_CMDS; i++) {
552 MemCmd cmd(i);
553 const std::string &cstr = cmd.toString();
554 transDist.subname(i, cstr);
555 }
556
557 pktCount
558 .init(cpuSidePorts.size(), memSidePorts.size())
559 .flags(total | nozero | nonan);
560
561 pktSize
562 .init(cpuSidePorts.size(), memSidePorts.size())
563 .flags(total | nozero | nonan);
564
565 // both the packet count and total size are two-dimensional
566 // vectors, indexed by CPU-side port id and memory-side port id, thus the
567 // neighbouring memory-side ports and CPU-side ports, they do not
568 // differentiate what came from the memory-side ports and was forwarded to
569 // the CPU-side ports (requests and snoop responses) and what came from
570 // the CPU-side ports and was forwarded to the memory-side ports (responses
571 // and snoop requests)
572 for (int i = 0; i < cpuSidePorts.size(); i++) {
573 pktCount.subname(i, cpuSidePorts[i]->getPeer().name());
574 pktSize.subname(i, cpuSidePorts[i]->getPeer().name());
575 for (int j = 0; j < memSidePorts.size(); j++) {
576 pktCount.ysubname(j, memSidePorts[j]->getPeer().name());
577 pktSize.ysubname(j, memSidePorts[j]->getPeer().name());
578 }
579 }
580 }
581
582 template <typename SrcType, typename DstType>
583 DrainState
584 BaseXBar::Layer<SrcType, DstType>::drain()
585 {
586 //We should check that we're not "doing" anything, and that noone is
587 //waiting. We might be idle but have someone waiting if the device we
588 //contacted for a retry didn't actually retry.
589 if (state != IDLE) {
590 DPRINTF(Drain, "Crossbar not drained\n");
591 return DrainState::Draining;
592 } else {
593 return DrainState::Drained;
594 }
595 }
596
597 /**
598 * Crossbar layer template instantiations. Could be removed with _impl.hh
599 * file, but since there are only two given options (RequestPort and
600 * ResponsePort) it seems a bit excessive at this point.
601 */
602 template class BaseXBar::Layer<ResponsePort, RequestPort>;
603 template class BaseXBar::Layer<RequestPort, ResponsePort>;