mem-cache: Fix setting prefetch bit
[gem5.git] / src / mem / bridge.cc
1 /*
2 * Copyright (c) 2011-2013, 2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /**
42 * @file
43 * Implementation of a memory-mapped bridge that connects a requestor
44 * and a responder through a request and response queue.
45 */
46
47 #include "mem/bridge.hh"
48
49 #include "base/trace.hh"
50 #include "debug/Bridge.hh"
51 #include "params/Bridge.hh"
52
53 Bridge::BridgeResponsePort::BridgeResponsePort(const std::string& _name,
54 Bridge& _bridge,
55 BridgeRequestPort& _memSidePort,
56 Cycles _delay, int _resp_limit,
57 std::vector<AddrRange> _ranges)
58 : ResponsePort(_name, &_bridge), bridge(_bridge),
59 memSidePort(_memSidePort), delay(_delay),
60 ranges(_ranges.begin(), _ranges.end()),
61 outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit),
62 sendEvent([this]{ trySendTiming(); }, _name)
63 {
64 }
65
66 Bridge::BridgeRequestPort::BridgeRequestPort(const std::string& _name,
67 Bridge& _bridge,
68 BridgeResponsePort& _cpuSidePort,
69 Cycles _delay, int _req_limit)
70 : RequestPort(_name, &_bridge), bridge(_bridge),
71 cpuSidePort(_cpuSidePort),
72 delay(_delay), reqQueueLimit(_req_limit),
73 sendEvent([this]{ trySendTiming(); }, _name)
74 {
75 }
76
77 Bridge::Bridge(const Params &p)
78 : ClockedObject(p),
79 cpuSidePort(p.name + ".cpu_side_port", *this, memSidePort,
80 ticksToCycles(p.delay), p.resp_size, p.ranges),
81 memSidePort(p.name + ".mem_side_port", *this, cpuSidePort,
82 ticksToCycles(p.delay), p.req_size)
83 {
84 }
85
86 Port &
87 Bridge::getPort(const std::string &if_name, PortID idx)
88 {
89 if (if_name == "mem_side_port")
90 return memSidePort;
91 else if (if_name == "cpu_side_port")
92 return cpuSidePort;
93 else
94 // pass it along to our super class
95 return ClockedObject::getPort(if_name, idx);
96 }
97
98 void
99 Bridge::init()
100 {
101 // make sure both sides are connected and have the same block size
102 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
103 fatal("Both ports of a bridge must be connected.\n");
104
105 // notify the request side of our address ranges
106 cpuSidePort.sendRangeChange();
107 }
108
109 bool
110 Bridge::BridgeResponsePort::respQueueFull() const
111 {
112 return outstandingResponses == respQueueLimit;
113 }
114
115 bool
116 Bridge::BridgeRequestPort::reqQueueFull() const
117 {
118 return transmitList.size() == reqQueueLimit;
119 }
120
121 bool
122 Bridge::BridgeRequestPort::recvTimingResp(PacketPtr pkt)
123 {
124 // all checks are done when the request is accepted on the response
125 // side, so we are guaranteed to have space for the response
126 DPRINTF(Bridge, "recvTimingResp: %s addr 0x%x\n",
127 pkt->cmdString(), pkt->getAddr());
128
129 DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size());
130
131 // technically the packet only reaches us after the header delay,
132 // and typically we also need to deserialise any payload (unless
133 // the two sides of the bridge are synchronous)
134 Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
135 pkt->headerDelay = pkt->payloadDelay = 0;
136
137 cpuSidePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
138 receive_delay);
139
140 return true;
141 }
142
143 bool
144 Bridge::BridgeResponsePort::recvTimingReq(PacketPtr pkt)
145 {
146 DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
147 pkt->cmdString(), pkt->getAddr());
148
149 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
150 "is responding");
151
152 // we should not get a new request after committing to retry the
153 // current one, but unfortunately the CPU violates this rule, so
154 // simply ignore it for now
155 if (retryReq)
156 return false;
157
158 DPRINTF(Bridge, "Response queue size: %d outresp: %d\n",
159 transmitList.size(), outstandingResponses);
160
161 // if the request queue is full then there is no hope
162 if (memSidePort.reqQueueFull()) {
163 DPRINTF(Bridge, "Request queue full\n");
164 retryReq = true;
165 } else {
166 // look at the response queue if we expect to see a response
167 bool expects_response = pkt->needsResponse();
168 if (expects_response) {
169 if (respQueueFull()) {
170 DPRINTF(Bridge, "Response queue full\n");
171 retryReq = true;
172 } else {
173 // ok to send the request with space for the response
174 DPRINTF(Bridge, "Reserving space for response\n");
175 assert(outstandingResponses != respQueueLimit);
176 ++outstandingResponses;
177
178 // no need to set retryReq to false as this is already the
179 // case
180 }
181 }
182
183 if (!retryReq) {
184 // technically the packet only reaches us after the header
185 // delay, and typically we also need to deserialise any
186 // payload (unless the two sides of the bridge are
187 // synchronous)
188 Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
189 pkt->headerDelay = pkt->payloadDelay = 0;
190
191 memSidePort.schedTimingReq(pkt, bridge.clockEdge(delay) +
192 receive_delay);
193 }
194 }
195
196 // remember that we are now stalling a packet and that we have to
197 // tell the sending requestor to retry once space becomes available,
198 // we make no distinction whether the stalling is due to the
199 // request queue or response queue being full
200 return !retryReq;
201 }
202
203 void
204 Bridge::BridgeResponsePort::retryStalledReq()
205 {
206 if (retryReq) {
207 DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
208 retryReq = false;
209 sendRetryReq();
210 }
211 }
212
213 void
214 Bridge::BridgeRequestPort::schedTimingReq(PacketPtr pkt, Tick when)
215 {
216 // If we're about to put this packet at the head of the queue, we
217 // need to schedule an event to do the transmit. Otherwise there
218 // should already be an event scheduled for sending the head
219 // packet.
220 if (transmitList.empty()) {
221 bridge.schedule(sendEvent, when);
222 }
223
224 assert(transmitList.size() != reqQueueLimit);
225
226 transmitList.emplace_back(pkt, when);
227 }
228
229
230 void
231 Bridge::BridgeResponsePort::schedTimingResp(PacketPtr pkt, Tick when)
232 {
233 // If we're about to put this packet at the head of the queue, we
234 // need to schedule an event to do the transmit. Otherwise there
235 // should already be an event scheduled for sending the head
236 // packet.
237 if (transmitList.empty()) {
238 bridge.schedule(sendEvent, when);
239 }
240
241 transmitList.emplace_back(pkt, when);
242 }
243
244 void
245 Bridge::BridgeRequestPort::trySendTiming()
246 {
247 assert(!transmitList.empty());
248
249 DeferredPacket req = transmitList.front();
250
251 assert(req.tick <= curTick());
252
253 PacketPtr pkt = req.pkt;
254
255 DPRINTF(Bridge, "trySend request addr 0x%x, queue size %d\n",
256 pkt->getAddr(), transmitList.size());
257
258 if (sendTimingReq(pkt)) {
259 // send successful
260 transmitList.pop_front();
261 DPRINTF(Bridge, "trySend request successful\n");
262
263 // If there are more packets to send, schedule event to try again.
264 if (!transmitList.empty()) {
265 DeferredPacket next_req = transmitList.front();
266 DPRINTF(Bridge, "Scheduling next send\n");
267 bridge.schedule(sendEvent, std::max(next_req.tick,
268 bridge.clockEdge()));
269 }
270
271 // if we have stalled a request due to a full request queue,
272 // then send a retry at this point, also note that if the
273 // request we stalled was waiting for the response queue
274 // rather than the request queue we might stall it again
275 cpuSidePort.retryStalledReq();
276 }
277
278 // if the send failed, then we try again once we receive a retry,
279 // and therefore there is no need to take any action
280 }
281
282 void
283 Bridge::BridgeResponsePort::trySendTiming()
284 {
285 assert(!transmitList.empty());
286
287 DeferredPacket resp = transmitList.front();
288
289 assert(resp.tick <= curTick());
290
291 PacketPtr pkt = resp.pkt;
292
293 DPRINTF(Bridge, "trySend response addr 0x%x, outstanding %d\n",
294 pkt->getAddr(), outstandingResponses);
295
296 if (sendTimingResp(pkt)) {
297 // send successful
298 transmitList.pop_front();
299 DPRINTF(Bridge, "trySend response successful\n");
300
301 assert(outstandingResponses != 0);
302 --outstandingResponses;
303
304 // If there are more packets to send, schedule event to try again.
305 if (!transmitList.empty()) {
306 DeferredPacket next_resp = transmitList.front();
307 DPRINTF(Bridge, "Scheduling next send\n");
308 bridge.schedule(sendEvent, std::max(next_resp.tick,
309 bridge.clockEdge()));
310 }
311
312 // if there is space in the request queue and we were stalling
313 // a request, it will definitely be possible to accept it now
314 // since there is guaranteed space in the response queue
315 if (!memSidePort.reqQueueFull() && retryReq) {
316 DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
317 retryReq = false;
318 sendRetryReq();
319 }
320 }
321
322 // if the send failed, then we try again once we receive a retry,
323 // and therefore there is no need to take any action
324 }
325
326 void
327 Bridge::BridgeRequestPort::recvReqRetry()
328 {
329 trySendTiming();
330 }
331
332 void
333 Bridge::BridgeResponsePort::recvRespRetry()
334 {
335 trySendTiming();
336 }
337
338 Tick
339 Bridge::BridgeResponsePort::recvAtomic(PacketPtr pkt)
340 {
341 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
342 "is responding");
343
344 return delay * bridge.clockPeriod() + memSidePort.sendAtomic(pkt);
345 }
346
347 void
348 Bridge::BridgeResponsePort::recvFunctional(PacketPtr pkt)
349 {
350 pkt->pushLabel(name());
351
352 // check the response queue
353 for (auto i = transmitList.begin(); i != transmitList.end(); ++i) {
354 if (pkt->trySatisfyFunctional((*i).pkt)) {
355 pkt->makeResponse();
356 return;
357 }
358 }
359
360 // also check the request port's request queue
361 if (memSidePort.trySatisfyFunctional(pkt)) {
362 return;
363 }
364
365 pkt->popLabel();
366
367 // fall through if pkt still not satisfied
368 memSidePort.sendFunctional(pkt);
369 }
370
371 bool
372 Bridge::BridgeRequestPort::trySatisfyFunctional(PacketPtr pkt)
373 {
374 bool found = false;
375 auto i = transmitList.begin();
376
377 while (i != transmitList.end() && !found) {
378 if (pkt->trySatisfyFunctional((*i).pkt)) {
379 pkt->makeResponse();
380 found = true;
381 }
382 ++i;
383 }
384
385 return found;
386 }
387
388 AddrRangeList
389 Bridge::BridgeResponsePort::getAddrRanges() const
390 {
391 return ranges;
392 }