arch, cpu, dev, gpu, mem, sim, python: start using getPort.
[gem5.git] / src / mem / qos / mem_sink.cc
1 /*
2 * Copyright (c) 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Author: Matteo Andreozzi
38 */
39
40 #include "debug/Drain.hh"
41 #include "debug/QOS.hh"
42 #include "mem_sink.hh"
43 #include "sim/system.hh"
44
45 namespace QoS {
46
47 MemSinkCtrl::MemSinkCtrl(const QoSMemSinkCtrlParams* p)
48 : MemCtrl(p), requestLatency(p->request_latency),
49 responseLatency(p->response_latency),
50 memoryPacketSize(p->memory_packet_size),
51 readBufferSize(p->read_buffer_size),
52 writeBufferSize(p->write_buffer_size), port(name() + ".port", *this),
53 retryRdReq(false), retryWrReq(false), nextRequest(0), nextReqEvent(this)
54 {
55 // Resize read and write queue to allocate space
56 // for configured QoS priorities
57 readQueue.resize(numPriorities());
58 writeQueue.resize(numPriorities());
59 }
60
61 MemSinkCtrl::~MemSinkCtrl()
62 {}
63
64 void
65 MemSinkCtrl::init()
66 {
67 MemCtrl::init();
68
69 // Allow unconnected memories as this is used in several ruby
70 // systems at the moment
71 if (port.isConnected()) {
72 port.sendRangeChange();
73 }
74 }
75
76 bool
77 MemSinkCtrl::readQueueFull(const uint64_t packets) const
78 {
79 return (totalReadQueueSize + packets > readBufferSize);
80 }
81
82 bool
83 MemSinkCtrl::writeQueueFull(const uint64_t packets) const
84 {
85 return (totalWriteQueueSize + packets > writeBufferSize);
86 }
87
88 Tick
89 MemSinkCtrl::recvAtomic(PacketPtr pkt)
90 {
91 panic_if(pkt->cacheResponding(),
92 "%s Should not see packets where cache is responding\n",
93 __func__);
94
95 access(pkt);
96 return responseLatency;
97 }
98
99 void
100 MemSinkCtrl::recvFunctional(PacketPtr pkt)
101 {
102 pkt->pushLabel(name());
103
104 functionalAccess(pkt);
105
106 pkt->popLabel();
107 }
108
109 Port &
110 MemSinkCtrl::getPort(const std::string &interface, PortID idx)
111 {
112 if (interface != "port") {
113 return MemObject::getPort(interface, idx);
114 } else {
115 return port;
116 }
117 }
118
119 bool
120 MemSinkCtrl::recvTimingReq(PacketPtr pkt)
121 {
122 // Request accepted
123 bool req_accepted = true;
124
125 panic_if(!(pkt->isRead() || pkt->isWrite()),
126 "%s. Should only see "
127 "read and writes at memory controller\n",
128 __func__);
129
130 panic_if(pkt->cacheResponding(),
131 "%s. Should not see packets where cache is responding\n",
132 __func__);
133
134 DPRINTF(QOS,
135 "%s: MASTER %s request %s addr %lld size %d\n",
136 __func__,
137 _system->getMasterName(pkt->req->masterId()),
138 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
139
140 uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
141
142 assert(required_entries);
143
144 // Schedule packet
145 uint8_t pkt_priority = qosSchedule({&readQueue, &writeQueue},
146 memoryPacketSize, pkt);
147
148 if (pkt->isRead()) {
149 if (readQueueFull(required_entries)) {
150 DPRINTF(QOS,
151 "%s Read queue full, not accepting\n", __func__);
152 // Remember that we have to retry this port
153 retryRdReq = true;
154 numReadRetries++;
155 req_accepted = false;
156 } else {
157 // Enqueue the incoming packet into corresponding
158 // QoS priority queue
159 readQueue.at(pkt_priority).push_back(pkt);
160 queuePolicy->enqueuePacket(pkt);
161 }
162 } else {
163 if (writeQueueFull(required_entries)) {
164 DPRINTF(QOS,
165 "%s Write queue full, not accepting\n", __func__);
166 // Remember that we have to retry this port
167 retryWrReq = true;
168 numWriteRetries++;
169 req_accepted = false;
170 } else {
171 // Enqueue the incoming packet into corresponding QoS
172 // priority queue
173 writeQueue.at(pkt_priority).push_back(pkt);
174 queuePolicy->enqueuePacket(pkt);
175 }
176 }
177
178 if (req_accepted) {
179 // The packet is accepted - log it
180 logRequest(pkt->isRead()? READ : WRITE,
181 pkt->req->masterId(),
182 pkt->qosValue(),
183 pkt->getAddr(),
184 required_entries);
185 }
186
187 // Check if we have to process next request event
188 if (!nextReqEvent.scheduled()) {
189 DPRINTF(QOS,
190 "%s scheduling next request at "
191 "time %d (next is %d)\n", __func__,
192 std::max(curTick(), nextRequest), nextRequest);
193 schedule(nextReqEvent, std::max(curTick(), nextRequest));
194 }
195 return req_accepted;
196 }
197
198 void
199 MemSinkCtrl::processNextReqEvent()
200 {
201 PacketPtr pkt = nullptr;
202
203 // Evaluate bus direction
204 busStateNext = selectNextBusState();
205
206 // Record turnaround stats and update current state direction
207 recordTurnaroundStats();
208
209 // Set current bus state
210 setCurrentBusState();
211
212 // Access current direction buffer
213 std::vector<PacketQueue>* queue_ptr = (busState == READ ? &readQueue :
214 &writeQueue);
215
216 DPRINTF(QOS,
217 "%s DUMPING %s queues status\n", __func__,
218 (busState == WRITE ? "WRITE" : "READ"));
219
220 if (DTRACE(QOS)) {
221 for (uint8_t i = 0; i < numPriorities(); ++i) {
222 std::string plist = "";
223 for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) {
224 plist += (std::to_string(e->req->masterId())) + " ";
225 }
226 DPRINTF(QOS,
227 "%s priority Queue [%i] contains %i elements, "
228 "packets are: [%s]\n", __func__, i,
229 busState == WRITE ? writeQueueSizes[i] :
230 readQueueSizes[i],
231 plist);
232 }
233 }
234
235 uint8_t curr_prio = numPriorities();
236
237 for (auto queue = (*queue_ptr).rbegin();
238 queue != (*queue_ptr).rend(); ++queue) {
239
240 curr_prio--;
241
242 DPRINTF(QOS,
243 "%s checking %s queue [%d] priority [%d packets]\n",
244 __func__, (busState == READ? "READ" : "WRITE"),
245 curr_prio, queue->size());
246
247 if (!queue->empty()) {
248 // Call the queue policy to select packet from priority queue
249 auto p_it = queuePolicy->selectPacket(&(*queue));
250 pkt = *p_it;
251 queue->erase(p_it);
252
253 DPRINTF(QOS,
254 "%s scheduling packet address %d for master %s from "
255 "priority queue %d\n", __func__, pkt->getAddr(),
256 _system->getMasterName(pkt->req->masterId()),
257 curr_prio);
258 break;
259 }
260 }
261
262 assert(pkt);
263
264 // Setup next request service time - do it here as retry request
265 // hands over control to the port
266 nextRequest = curTick() + requestLatency;
267
268 uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize);
269
270 DPRINTF(QOS,
271 "%s scheduled packet address %d for master %s size is %d, "
272 "corresponds to %d memory packets\n", __func__, pkt->getAddr(),
273 _system->getMasterName(pkt->req->masterId()),
274 pkt->getSize(), removed_entries);
275
276 // Schedule response
277 panic_if(!pkt->needsResponse(),
278 "%s response not required\n", __func__);
279
280 // Do the actual memory access which also turns the packet
281 // into a response
282 access(pkt);
283
284 // Log the response
285 logResponse(pkt->isRead()? READ : WRITE,
286 pkt->req->masterId(),
287 pkt->qosValue(),
288 pkt->getAddr(),
289 removed_entries, responseLatency);
290
291 // Schedule the response
292 port.schedTimingResp(pkt, curTick() + responseLatency);
293 DPRINTF(QOS,
294 "%s response scheduled at time %d\n",
295 __func__, curTick() + responseLatency);
296
297 // Finally - handle retry requests - this handles control
298 // to the port, so do it last
299 if (busState == READ && retryRdReq) {
300 retryRdReq = false;
301 port.sendRetryReq();
302 } else if (busState == WRITE && retryWrReq) {
303 retryWrReq = false;
304 port.sendRetryReq();
305 }
306
307 // Check if we have to schedule another request event
308 if ((totalReadQueueSize || totalWriteQueueSize) &&
309 !nextReqEvent.scheduled()) {
310
311 schedule(nextReqEvent, curTick() + requestLatency);
312 DPRINTF(QOS,
313 "%s scheduling next request event at tick %d\n",
314 __func__, curTick() + requestLatency);
315 }
316 }
317
318 DrainState
319 MemSinkCtrl::drain()
320 {
321 if (totalReadQueueSize || totalWriteQueueSize) {
322 DPRINTF(Drain,
323 "%s queues have requests, waiting to drain\n",
324 __func__);
325 return DrainState::Draining;
326 } else {
327 return DrainState::Drained;
328 }
329 }
330
331 void
332 MemSinkCtrl::regStats()
333 {
334 MemCtrl::regStats();
335
336 // Initialize all the stats
337 using namespace Stats;
338
339 numReadRetries.name(name() + ".numReadRetries")
340 .desc("Number of read retries");
341 numWriteRetries.name(name() + ".numWriteRetries")
342 .desc("Number of write retries");
343 }
344
345 MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n,
346 MemSinkCtrl& m)
347 : QueuedSlavePort(n, &m, queue, true), memory(m), queue(memory, *this, true)
348 {}
349
350 AddrRangeList
351 MemSinkCtrl::MemoryPort::getAddrRanges() const
352 {
353 AddrRangeList ranges;
354 ranges.push_back(memory.getAddrRange());
355 return ranges;
356 }
357
358 Tick
359 MemSinkCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
360 {
361 return memory.recvAtomic(pkt);
362 }
363
364 void
365 MemSinkCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
366 {
367 pkt->pushLabel(memory.name());
368
369 if (!queue.trySatisfyFunctional(pkt)) {
370 // Default implementation of SimpleTimingPort::recvFunctional()
371 // calls recvAtomic() and throws away the latency; we can save a
372 // little here by just not calculating the latency.
373 memory.recvFunctional(pkt);
374 }
375
376 pkt->popLabel();
377 }
378
379 bool
380 MemSinkCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
381 {
382 return memory.recvTimingReq(pkt);
383 }
384
385 } // namespace QoS
386
387 QoS::MemSinkCtrl*
388 QoSMemSinkCtrlParams::create()
389 {
390 return new QoS::MemSinkCtrl(this);
391 }
392