misc: Replaced master/slave terminology
[gem5.git] / src / mem / qos / mem_sink.cc
1 /*
2 * Copyright (c) 2018-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Author: Matteo Andreozzi
38 */
39
40 #include "debug/Drain.hh"
41 #include "debug/QOS.hh"
42 #include "mem_sink.hh"
43 #include "params/QoSMemSinkInterface.hh"
44 #include "sim/system.hh"
45
46 namespace QoS {
47
48 MemSinkCtrl::MemSinkCtrl(const QoSMemSinkCtrlParams* p)
49 : MemCtrl(p), requestLatency(p->request_latency),
50 responseLatency(p->response_latency),
51 memoryPacketSize(p->memory_packet_size),
52 readBufferSize(p->read_buffer_size),
53 writeBufferSize(p->write_buffer_size), port(name() + ".port", *this),
54 interface(p->interface),
55 retryRdReq(false), retryWrReq(false), nextRequest(0), nextReqEvent(this)
56 {
57 // Resize read and write queue to allocate space
58 // for configured QoS priorities
59 readQueue.resize(numPriorities());
60 writeQueue.resize(numPriorities());
61
62 interface->setMemCtrl(this);
63 }
64
65 MemSinkCtrl::~MemSinkCtrl()
66 {}
67
68 void
69 MemSinkCtrl::init()
70 {
71 MemCtrl::init();
72
73 // Allow unconnected memories as this is used in several ruby
74 // systems at the moment
75 if (port.isConnected()) {
76 port.sendRangeChange();
77 }
78 }
79
80 bool
81 MemSinkCtrl::readQueueFull(const uint64_t packets) const
82 {
83 return (totalReadQueueSize + packets > readBufferSize);
84 }
85
86 bool
87 MemSinkCtrl::writeQueueFull(const uint64_t packets) const
88 {
89 return (totalWriteQueueSize + packets > writeBufferSize);
90 }
91
92 Tick
93 MemSinkCtrl::recvAtomic(PacketPtr pkt)
94 {
95 panic_if(pkt->cacheResponding(),
96 "%s Should not see packets where cache is responding\n",
97 __func__);
98
99 interface->access(pkt);
100 return responseLatency;
101 }
102
103 void
104 MemSinkCtrl::recvFunctional(PacketPtr pkt)
105 {
106 pkt->pushLabel(name());
107
108 interface->functionalAccess(pkt);
109
110 pkt->popLabel();
111 }
112
113 Port &
114 MemSinkCtrl::getPort(const std::string &interface, PortID idx)
115 {
116 if (interface != "port") {
117 return MemCtrl::getPort(interface, idx);
118 } else {
119 return port;
120 }
121 }
122
123 bool
124 MemSinkCtrl::recvTimingReq(PacketPtr pkt)
125 {
126 // Request accepted
127 bool req_accepted = true;
128
129 panic_if(!(pkt->isRead() || pkt->isWrite()),
130 "%s. Should only see "
131 "read and writes at memory controller\n",
132 __func__);
133
134 panic_if(pkt->cacheResponding(),
135 "%s. Should not see packets where cache is responding\n",
136 __func__);
137
138 DPRINTF(QOS,
139 "%s: REQUESTOR %s request %s addr %lld size %d\n",
140 __func__,
141 _system->getRequestorName(pkt->req->requestorId()),
142 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
143
144 uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
145
146 assert(required_entries);
147
148 // Schedule packet
149 uint8_t pkt_priority = qosSchedule({&readQueue, &writeQueue},
150 memoryPacketSize, pkt);
151
152 if (pkt->isRead()) {
153 if (readQueueFull(required_entries)) {
154 DPRINTF(QOS,
155 "%s Read queue full, not accepting\n", __func__);
156 // Remember that we have to retry this port
157 retryRdReq = true;
158 numReadRetries++;
159 req_accepted = false;
160 } else {
161 // Enqueue the incoming packet into corresponding
162 // QoS priority queue
163 readQueue.at(pkt_priority).push_back(pkt);
164 queuePolicy->enqueuePacket(pkt);
165 }
166 } else {
167 if (writeQueueFull(required_entries)) {
168 DPRINTF(QOS,
169 "%s Write queue full, not accepting\n", __func__);
170 // Remember that we have to retry this port
171 retryWrReq = true;
172 numWriteRetries++;
173 req_accepted = false;
174 } else {
175 // Enqueue the incoming packet into corresponding QoS
176 // priority queue
177 writeQueue.at(pkt_priority).push_back(pkt);
178 queuePolicy->enqueuePacket(pkt);
179 }
180 }
181
182 if (req_accepted) {
183 // The packet is accepted - log it
184 logRequest(pkt->isRead()? READ : WRITE,
185 pkt->req->requestorId(),
186 pkt->qosValue(),
187 pkt->getAddr(),
188 required_entries);
189 }
190
191 // Check if we have to process next request event
192 if (!nextReqEvent.scheduled()) {
193 DPRINTF(QOS,
194 "%s scheduling next request at "
195 "time %d (next is %d)\n", __func__,
196 std::max(curTick(), nextRequest), nextRequest);
197 schedule(nextReqEvent, std::max(curTick(), nextRequest));
198 }
199 return req_accepted;
200 }
201
202 void
203 MemSinkCtrl::processNextReqEvent()
204 {
205 PacketPtr pkt = nullptr;
206
207 // Evaluate bus direction
208 busStateNext = selectNextBusState();
209
210 // Record turnaround stats and update current state direction
211 recordTurnaroundStats();
212
213 // Set current bus state
214 setCurrentBusState();
215
216 // Access current direction buffer
217 std::vector<PacketQueue>* queue_ptr = (busState == READ ? &readQueue :
218 &writeQueue);
219
220 DPRINTF(QOS,
221 "%s DUMPING %s queues status\n", __func__,
222 (busState == WRITE ? "WRITE" : "READ"));
223
224 if (DTRACE(QOS)) {
225 for (uint8_t i = 0; i < numPriorities(); ++i) {
226 std::string plist = "";
227 for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) {
228 plist += (std::to_string(e->req->requestorId())) + " ";
229 }
230 DPRINTF(QOS,
231 "%s priority Queue [%i] contains %i elements, "
232 "packets are: [%s]\n", __func__, i,
233 busState == WRITE ? writeQueueSizes[i] :
234 readQueueSizes[i],
235 plist);
236 }
237 }
238
239 uint8_t curr_prio = numPriorities();
240
241 for (auto queue = (*queue_ptr).rbegin();
242 queue != (*queue_ptr).rend(); ++queue) {
243
244 curr_prio--;
245
246 DPRINTF(QOS,
247 "%s checking %s queue [%d] priority [%d packets]\n",
248 __func__, (busState == READ? "READ" : "WRITE"),
249 curr_prio, queue->size());
250
251 if (!queue->empty()) {
252 // Call the queue policy to select packet from priority queue
253 auto p_it = queuePolicy->selectPacket(&(*queue));
254 pkt = *p_it;
255 queue->erase(p_it);
256
257 DPRINTF(QOS,
258 "%s scheduling packet address %d for requestor %s from "
259 "priority queue %d\n", __func__, pkt->getAddr(),
260 _system->getRequestorName(pkt->req->requestorId()),
261 curr_prio);
262 break;
263 }
264 }
265
266 assert(pkt);
267
268 // Setup next request service time - do it here as retry request
269 // hands over control to the port
270 nextRequest = curTick() + requestLatency;
271
272 uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize);
273
274 DPRINTF(QOS,
275 "%s scheduled packet address %d for requestor %s size is %d, "
276 "corresponds to %d memory packets\n", __func__, pkt->getAddr(),
277 _system->getRequestorName(pkt->req->requestorId()),
278 pkt->getSize(), removed_entries);
279
280 // Schedule response
281 panic_if(!pkt->needsResponse(),
282 "%s response not required\n", __func__);
283
284 // Do the actual memory access which also turns the packet
285 // into a response
286 interface->access(pkt);
287
288 // Log the response
289 logResponse(pkt->isRead()? READ : WRITE,
290 pkt->req->requestorId(),
291 pkt->qosValue(),
292 pkt->getAddr(),
293 removed_entries, responseLatency);
294
295 // Schedule the response
296 port.schedTimingResp(pkt, curTick() + responseLatency);
297 DPRINTF(QOS,
298 "%s response scheduled at time %d\n",
299 __func__, curTick() + responseLatency);
300
301 // Finally - handle retry requests - this handles control
302 // to the port, so do it last
303 if (busState == READ && retryRdReq) {
304 retryRdReq = false;
305 port.sendRetryReq();
306 } else if (busState == WRITE && retryWrReq) {
307 retryWrReq = false;
308 port.sendRetryReq();
309 }
310
311 // Check if we have to schedule another request event
312 if ((totalReadQueueSize || totalWriteQueueSize) &&
313 !nextReqEvent.scheduled()) {
314
315 schedule(nextReqEvent, curTick() + requestLatency);
316 DPRINTF(QOS,
317 "%s scheduling next request event at tick %d\n",
318 __func__, curTick() + requestLatency);
319 }
320 }
321
322 DrainState
323 MemSinkCtrl::drain()
324 {
325 if (totalReadQueueSize || totalWriteQueueSize) {
326 DPRINTF(Drain,
327 "%s queues have requests, waiting to drain\n",
328 __func__);
329 return DrainState::Draining;
330 } else {
331 return DrainState::Drained;
332 }
333 }
334
335 void
336 MemSinkCtrl::regStats()
337 {
338 MemCtrl::regStats();
339
340 // Initialize all the stats
341 using namespace Stats;
342
343 numReadRetries.name(name() + ".numReadRetries")
344 .desc("Number of read retries");
345 numWriteRetries.name(name() + ".numWriteRetries")
346 .desc("Number of write retries");
347 }
348
349 MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n,
350 MemSinkCtrl& m)
351 : QueuedResponsePort(n, &m, queue, true),
352 memory(m), queue(memory, *this, true)
353 {}
354
355 AddrRangeList
356 MemSinkCtrl::MemoryPort::getAddrRanges() const
357 {
358 AddrRangeList ranges;
359 ranges.push_back(memory.interface->getAddrRange());
360 return ranges;
361 }
362
363 Tick
364 MemSinkCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
365 {
366 return memory.recvAtomic(pkt);
367 }
368
369 void
370 MemSinkCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
371 {
372 pkt->pushLabel(memory.name());
373
374 if (!queue.trySatisfyFunctional(pkt)) {
375 // Default implementation of SimpleTimingPort::recvFunctional()
376 // calls recvAtomic() and throws away the latency; we can save a
377 // little here by just not calculating the latency.
378 memory.recvFunctional(pkt);
379 }
380
381 pkt->popLabel();
382 }
383
384 bool
385 MemSinkCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
386 {
387 return memory.recvTimingReq(pkt);
388 }
389
390 } // namespace QoS
391
392 QoS::MemSinkCtrl*
393 QoSMemSinkCtrlParams::create()
394 {
395 return new QoS::MemSinkCtrl(this);
396 }
397
398 QoSMemSinkInterface::QoSMemSinkInterface(const QoSMemSinkInterfaceParams* _p)
399 : AbstractMemory(_p)
400 {
401 }
402
403 QoSMemSinkInterface*
404 QoSMemSinkInterfaceParams::create()
405 {
406 return new QoSMemSinkInterface(this);
407 }