mem: Delete authors lists from mem files.
[gem5.git] / src / mem / bridge.cc
1 /*
2 * Copyright (c) 2011-2013, 2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /**
42 * @file
43 * Implementation of a memory-mapped bridge that connects a master
44 * and a slave through a request and response queue.
45 */
46
47 #include "mem/bridge.hh"
48
49 #include "base/trace.hh"
50 #include "debug/Bridge.hh"
51 #include "params/Bridge.hh"
52
53 Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
54 Bridge& _bridge,
55 BridgeMasterPort& _masterPort,
56 Cycles _delay, int _resp_limit,
57 std::vector<AddrRange> _ranges)
58 : SlavePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort),
59 delay(_delay), ranges(_ranges.begin(), _ranges.end()),
60 outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit),
61 sendEvent([this]{ trySendTiming(); }, _name)
62 {
63 }
64
65 Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
66 Bridge& _bridge,
67 BridgeSlavePort& _slavePort,
68 Cycles _delay, int _req_limit)
69 : MasterPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort),
70 delay(_delay), reqQueueLimit(_req_limit),
71 sendEvent([this]{ trySendTiming(); }, _name)
72 {
73 }
74
75 Bridge::Bridge(Params *p)
76 : ClockedObject(p),
77 slavePort(p->name + ".slave", *this, masterPort,
78 ticksToCycles(p->delay), p->resp_size, p->ranges),
79 masterPort(p->name + ".master", *this, slavePort,
80 ticksToCycles(p->delay), p->req_size)
81 {
82 }
83
84 Port &
85 Bridge::getPort(const std::string &if_name, PortID idx)
86 {
87 if (if_name == "master")
88 return masterPort;
89 else if (if_name == "slave")
90 return slavePort;
91 else
92 // pass it along to our super class
93 return ClockedObject::getPort(if_name, idx);
94 }
95
96 void
97 Bridge::init()
98 {
99 // make sure both sides are connected and have the same block size
100 if (!slavePort.isConnected() || !masterPort.isConnected())
101 fatal("Both ports of a bridge must be connected.\n");
102
103 // notify the master side of our address ranges
104 slavePort.sendRangeChange();
105 }
106
107 bool
108 Bridge::BridgeSlavePort::respQueueFull() const
109 {
110 return outstandingResponses == respQueueLimit;
111 }
112
113 bool
114 Bridge::BridgeMasterPort::reqQueueFull() const
115 {
116 return transmitList.size() == reqQueueLimit;
117 }
118
119 bool
120 Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
121 {
122 // all checks are done when the request is accepted on the slave
123 // side, so we are guaranteed to have space for the response
124 DPRINTF(Bridge, "recvTimingResp: %s addr 0x%x\n",
125 pkt->cmdString(), pkt->getAddr());
126
127 DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size());
128
129 // technically the packet only reaches us after the header delay,
130 // and typically we also need to deserialise any payload (unless
131 // the two sides of the bridge are synchronous)
132 Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
133 pkt->headerDelay = pkt->payloadDelay = 0;
134
135 slavePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
136 receive_delay);
137
138 return true;
139 }
140
141 bool
142 Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
143 {
144 DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
145 pkt->cmdString(), pkt->getAddr());
146
147 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
148 "is responding");
149
150 // we should not get a new request after committing to retry the
151 // current one, but unfortunately the CPU violates this rule, so
152 // simply ignore it for now
153 if (retryReq)
154 return false;
155
156 DPRINTF(Bridge, "Response queue size: %d outresp: %d\n",
157 transmitList.size(), outstandingResponses);
158
159 // if the request queue is full then there is no hope
160 if (masterPort.reqQueueFull()) {
161 DPRINTF(Bridge, "Request queue full\n");
162 retryReq = true;
163 } else {
164 // look at the response queue if we expect to see a response
165 bool expects_response = pkt->needsResponse();
166 if (expects_response) {
167 if (respQueueFull()) {
168 DPRINTF(Bridge, "Response queue full\n");
169 retryReq = true;
170 } else {
171 // ok to send the request with space for the response
172 DPRINTF(Bridge, "Reserving space for response\n");
173 assert(outstandingResponses != respQueueLimit);
174 ++outstandingResponses;
175
176 // no need to set retryReq to false as this is already the
177 // case
178 }
179 }
180
181 if (!retryReq) {
182 // technically the packet only reaches us after the header
183 // delay, and typically we also need to deserialise any
184 // payload (unless the two sides of the bridge are
185 // synchronous)
186 Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
187 pkt->headerDelay = pkt->payloadDelay = 0;
188
189 masterPort.schedTimingReq(pkt, bridge.clockEdge(delay) +
190 receive_delay);
191 }
192 }
193
194 // remember that we are now stalling a packet and that we have to
195 // tell the sending master to retry once space becomes available,
196 // we make no distinction whether the stalling is due to the
197 // request queue or response queue being full
198 return !retryReq;
199 }
200
201 void
202 Bridge::BridgeSlavePort::retryStalledReq()
203 {
204 if (retryReq) {
205 DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
206 retryReq = false;
207 sendRetryReq();
208 }
209 }
210
211 void
212 Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
213 {
214 // If we're about to put this packet at the head of the queue, we
215 // need to schedule an event to do the transmit. Otherwise there
216 // should already be an event scheduled for sending the head
217 // packet.
218 if (transmitList.empty()) {
219 bridge.schedule(sendEvent, when);
220 }
221
222 assert(transmitList.size() != reqQueueLimit);
223
224 transmitList.emplace_back(pkt, when);
225 }
226
227
228 void
229 Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
230 {
231 // If we're about to put this packet at the head of the queue, we
232 // need to schedule an event to do the transmit. Otherwise there
233 // should already be an event scheduled for sending the head
234 // packet.
235 if (transmitList.empty()) {
236 bridge.schedule(sendEvent, when);
237 }
238
239 transmitList.emplace_back(pkt, when);
240 }
241
242 void
243 Bridge::BridgeMasterPort::trySendTiming()
244 {
245 assert(!transmitList.empty());
246
247 DeferredPacket req = transmitList.front();
248
249 assert(req.tick <= curTick());
250
251 PacketPtr pkt = req.pkt;
252
253 DPRINTF(Bridge, "trySend request addr 0x%x, queue size %d\n",
254 pkt->getAddr(), transmitList.size());
255
256 if (sendTimingReq(pkt)) {
257 // send successful
258 transmitList.pop_front();
259 DPRINTF(Bridge, "trySend request successful\n");
260
261 // If there are more packets to send, schedule event to try again.
262 if (!transmitList.empty()) {
263 DeferredPacket next_req = transmitList.front();
264 DPRINTF(Bridge, "Scheduling next send\n");
265 bridge.schedule(sendEvent, std::max(next_req.tick,
266 bridge.clockEdge()));
267 }
268
269 // if we have stalled a request due to a full request queue,
270 // then send a retry at this point, also note that if the
271 // request we stalled was waiting for the response queue
272 // rather than the request queue we might stall it again
273 slavePort.retryStalledReq();
274 }
275
276 // if the send failed, then we try again once we receive a retry,
277 // and therefore there is no need to take any action
278 }
279
280 void
281 Bridge::BridgeSlavePort::trySendTiming()
282 {
283 assert(!transmitList.empty());
284
285 DeferredPacket resp = transmitList.front();
286
287 assert(resp.tick <= curTick());
288
289 PacketPtr pkt = resp.pkt;
290
291 DPRINTF(Bridge, "trySend response addr 0x%x, outstanding %d\n",
292 pkt->getAddr(), outstandingResponses);
293
294 if (sendTimingResp(pkt)) {
295 // send successful
296 transmitList.pop_front();
297 DPRINTF(Bridge, "trySend response successful\n");
298
299 assert(outstandingResponses != 0);
300 --outstandingResponses;
301
302 // If there are more packets to send, schedule event to try again.
303 if (!transmitList.empty()) {
304 DeferredPacket next_resp = transmitList.front();
305 DPRINTF(Bridge, "Scheduling next send\n");
306 bridge.schedule(sendEvent, std::max(next_resp.tick,
307 bridge.clockEdge()));
308 }
309
310 // if there is space in the request queue and we were stalling
311 // a request, it will definitely be possible to accept it now
312 // since there is guaranteed space in the response queue
313 if (!masterPort.reqQueueFull() && retryReq) {
314 DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
315 retryReq = false;
316 sendRetryReq();
317 }
318 }
319
320 // if the send failed, then we try again once we receive a retry,
321 // and therefore there is no need to take any action
322 }
323
324 void
325 Bridge::BridgeMasterPort::recvReqRetry()
326 {
327 trySendTiming();
328 }
329
330 void
331 Bridge::BridgeSlavePort::recvRespRetry()
332 {
333 trySendTiming();
334 }
335
336 Tick
337 Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
338 {
339 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
340 "is responding");
341
342 return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt);
343 }
344
345 void
346 Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt)
347 {
348 pkt->pushLabel(name());
349
350 // check the response queue
351 for (auto i = transmitList.begin(); i != transmitList.end(); ++i) {
352 if (pkt->trySatisfyFunctional((*i).pkt)) {
353 pkt->makeResponse();
354 return;
355 }
356 }
357
358 // also check the master port's request queue
359 if (masterPort.trySatisfyFunctional(pkt)) {
360 return;
361 }
362
363 pkt->popLabel();
364
365 // fall through if pkt still not satisfied
366 masterPort.sendFunctional(pkt);
367 }
368
369 bool
370 Bridge::BridgeMasterPort::trySatisfyFunctional(PacketPtr pkt)
371 {
372 bool found = false;
373 auto i = transmitList.begin();
374
375 while (i != transmitList.end() && !found) {
376 if (pkt->trySatisfyFunctional((*i).pkt)) {
377 pkt->makeResponse();
378 found = true;
379 }
380 ++i;
381 }
382
383 return found;
384 }
385
386 AddrRangeList
387 Bridge::BridgeSlavePort::getAddrRanges() const
388 {
389 return ranges;
390 }
391
392 Bridge *
393 BridgeParams::create()
394 {
395 return new Bridge(this);
396 }