mem: Add Units to mem stats
[gem5.git] / src / mem / qos / mem_sink.cc
1 /*
2 * Copyright (c) 2018-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Author: Matteo Andreozzi
38 */
39
40 #include "debug/Drain.hh"
41 #include "debug/QOS.hh"
42 #include "mem_sink.hh"
43 #include "params/QoSMemSinkInterface.hh"
44 #include "sim/system.hh"
45
46 namespace QoS {
47
48 MemSinkCtrl::MemSinkCtrl(const QoSMemSinkCtrlParams &p)
49 : MemCtrl(p), requestLatency(p.request_latency),
50 responseLatency(p.response_latency),
51 memoryPacketSize(p.memory_packet_size),
52 readBufferSize(p.read_buffer_size),
53 writeBufferSize(p.write_buffer_size), port(name() + ".port", *this),
54 interface(p.interface),
55 retryRdReq(false), retryWrReq(false), nextRequest(0), nextReqEvent(this),
56 stats(this)
57 {
58 // Resize read and write queue to allocate space
59 // for configured QoS priorities
60 readQueue.resize(numPriorities());
61 writeQueue.resize(numPriorities());
62
63 interface->setMemCtrl(this);
64 }
65
66 MemSinkCtrl::~MemSinkCtrl()
67 {}
68
69 void
70 MemSinkCtrl::init()
71 {
72 MemCtrl::init();
73
74 // Allow unconnected memories as this is used in several ruby
75 // systems at the moment
76 if (port.isConnected()) {
77 port.sendRangeChange();
78 }
79 }
80
81 bool
82 MemSinkCtrl::readQueueFull(const uint64_t packets) const
83 {
84 return (totalReadQueueSize + packets > readBufferSize);
85 }
86
87 bool
88 MemSinkCtrl::writeQueueFull(const uint64_t packets) const
89 {
90 return (totalWriteQueueSize + packets > writeBufferSize);
91 }
92
93 Tick
94 MemSinkCtrl::recvAtomic(PacketPtr pkt)
95 {
96 panic_if(pkt->cacheResponding(),
97 "%s Should not see packets where cache is responding\n",
98 __func__);
99
100 interface->access(pkt);
101 return responseLatency;
102 }
103
104 void
105 MemSinkCtrl::recvFunctional(PacketPtr pkt)
106 {
107 pkt->pushLabel(name());
108
109 interface->functionalAccess(pkt);
110
111 pkt->popLabel();
112 }
113
114 Port &
115 MemSinkCtrl::getPort(const std::string &interface, PortID idx)
116 {
117 if (interface != "port") {
118 return MemCtrl::getPort(interface, idx);
119 } else {
120 return port;
121 }
122 }
123
124 bool
125 MemSinkCtrl::recvTimingReq(PacketPtr pkt)
126 {
127 // Request accepted
128 bool req_accepted = true;
129
130 panic_if(!(pkt->isRead() || pkt->isWrite()),
131 "%s. Should only see "
132 "read and writes at memory controller\n",
133 __func__);
134
135 panic_if(pkt->cacheResponding(),
136 "%s. Should not see packets where cache is responding\n",
137 __func__);
138
139 DPRINTF(QOS,
140 "%s: REQUESTOR %s request %s addr %lld size %d\n",
141 __func__,
142 _system->getRequestorName(pkt->req->requestorId()),
143 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
144
145 uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
146
147 assert(required_entries);
148
149 // Schedule packet
150 uint8_t pkt_priority = qosSchedule({&readQueue, &writeQueue},
151 memoryPacketSize, pkt);
152
153 if (pkt->isRead()) {
154 if (readQueueFull(required_entries)) {
155 DPRINTF(QOS,
156 "%s Read queue full, not accepting\n", __func__);
157 // Remember that we have to retry this port
158 retryRdReq = true;
159 stats.numReadRetries++;
160 req_accepted = false;
161 } else {
162 // Enqueue the incoming packet into corresponding
163 // QoS priority queue
164 readQueue.at(pkt_priority).push_back(pkt);
165 queuePolicy->enqueuePacket(pkt);
166 }
167 } else {
168 if (writeQueueFull(required_entries)) {
169 DPRINTF(QOS,
170 "%s Write queue full, not accepting\n", __func__);
171 // Remember that we have to retry this port
172 retryWrReq = true;
173 stats.numWriteRetries++;
174 req_accepted = false;
175 } else {
176 // Enqueue the incoming packet into corresponding QoS
177 // priority queue
178 writeQueue.at(pkt_priority).push_back(pkt);
179 queuePolicy->enqueuePacket(pkt);
180 }
181 }
182
183 if (req_accepted) {
184 // The packet is accepted - log it
185 logRequest(pkt->isRead()? READ : WRITE,
186 pkt->req->requestorId(),
187 pkt->qosValue(),
188 pkt->getAddr(),
189 required_entries);
190 }
191
192 // Check if we have to process next request event
193 if (!nextReqEvent.scheduled()) {
194 DPRINTF(QOS,
195 "%s scheduling next request at "
196 "time %d (next is %d)\n", __func__,
197 std::max(curTick(), nextRequest), nextRequest);
198 schedule(nextReqEvent, std::max(curTick(), nextRequest));
199 }
200 return req_accepted;
201 }
202
203 void
204 MemSinkCtrl::processNextReqEvent()
205 {
206 PacketPtr pkt = nullptr;
207
208 // Evaluate bus direction
209 busStateNext = selectNextBusState();
210
211 // Record turnaround stats and update current state direction
212 recordTurnaroundStats();
213
214 // Set current bus state
215 setCurrentBusState();
216
217 // Access current direction buffer
218 std::vector<PacketQueue>* queue_ptr = (busState == READ ? &readQueue :
219 &writeQueue);
220
221 DPRINTF(QOS,
222 "%s DUMPING %s queues status\n", __func__,
223 (busState == WRITE ? "WRITE" : "READ"));
224
225 if (DTRACE(QOS)) {
226 for (uint8_t i = 0; i < numPriorities(); ++i) {
227 std::string plist = "";
228 for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) {
229 plist += (std::to_string(e->req->requestorId())) + " ";
230 }
231 DPRINTF(QOS,
232 "%s priority Queue [%i] contains %i elements, "
233 "packets are: [%s]\n", __func__, i,
234 busState == WRITE ? writeQueueSizes[i] :
235 readQueueSizes[i],
236 plist);
237 }
238 }
239
240 uint8_t curr_prio = numPriorities();
241
242 for (auto queue = (*queue_ptr).rbegin();
243 queue != (*queue_ptr).rend(); ++queue) {
244
245 curr_prio--;
246
247 DPRINTF(QOS,
248 "%s checking %s queue [%d] priority [%d packets]\n",
249 __func__, (busState == READ? "READ" : "WRITE"),
250 curr_prio, queue->size());
251
252 if (!queue->empty()) {
253 // Call the queue policy to select packet from priority queue
254 auto p_it = queuePolicy->selectPacket(&(*queue));
255 pkt = *p_it;
256 queue->erase(p_it);
257
258 DPRINTF(QOS,
259 "%s scheduling packet address %d for requestor %s from "
260 "priority queue %d\n", __func__, pkt->getAddr(),
261 _system->getRequestorName(pkt->req->requestorId()),
262 curr_prio);
263 break;
264 }
265 }
266
267 assert(pkt);
268
269 // Setup next request service time - do it here as retry request
270 // hands over control to the port
271 nextRequest = curTick() + requestLatency;
272
273 uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize);
274
275 DPRINTF(QOS,
276 "%s scheduled packet address %d for requestor %s size is %d, "
277 "corresponds to %d memory packets\n", __func__, pkt->getAddr(),
278 _system->getRequestorName(pkt->req->requestorId()),
279 pkt->getSize(), removed_entries);
280
281 // Schedule response
282 panic_if(!pkt->needsResponse(),
283 "%s response not required\n", __func__);
284
285 // Do the actual memory access which also turns the packet
286 // into a response
287 interface->access(pkt);
288
289 // Log the response
290 logResponse(pkt->isRead()? READ : WRITE,
291 pkt->req->requestorId(),
292 pkt->qosValue(),
293 pkt->getAddr(),
294 removed_entries, responseLatency);
295
296 // Schedule the response
297 port.schedTimingResp(pkt, curTick() + responseLatency);
298 DPRINTF(QOS,
299 "%s response scheduled at time %d\n",
300 __func__, curTick() + responseLatency);
301
302 // Finally - handle retry requests - this handles control
303 // to the port, so do it last
304 if (busState == READ && retryRdReq) {
305 retryRdReq = false;
306 port.sendRetryReq();
307 } else if (busState == WRITE && retryWrReq) {
308 retryWrReq = false;
309 port.sendRetryReq();
310 }
311
312 // Check if we have to schedule another request event
313 if ((totalReadQueueSize || totalWriteQueueSize) &&
314 !nextReqEvent.scheduled()) {
315
316 schedule(nextReqEvent, curTick() + requestLatency);
317 DPRINTF(QOS,
318 "%s scheduling next request event at tick %d\n",
319 __func__, curTick() + requestLatency);
320 }
321 }
322
323 DrainState
324 MemSinkCtrl::drain()
325 {
326 if (totalReadQueueSize || totalWriteQueueSize) {
327 DPRINTF(Drain,
328 "%s queues have requests, waiting to drain\n",
329 __func__);
330 return DrainState::Draining;
331 } else {
332 return DrainState::Drained;
333 }
334 }
335
336 MemSinkCtrl::MemSinkCtrlStats::MemSinkCtrlStats(Stats::Group *parent)
337 : Stats::Group(parent),
338 ADD_STAT(numReadRetries, UNIT_COUNT, "Number of read retries"),
339 ADD_STAT(numWriteRetries, UNIT_COUNT, "Number of write retries")
340 {
341 }
342
343 MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n,
344 MemSinkCtrl& m)
345 : QueuedResponsePort(n, &m, queue, true),
346 memory(m), queue(memory, *this, true)
347 {}
348
349 AddrRangeList
350 MemSinkCtrl::MemoryPort::getAddrRanges() const
351 {
352 AddrRangeList ranges;
353 ranges.push_back(memory.interface->getAddrRange());
354 return ranges;
355 }
356
357 Tick
358 MemSinkCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
359 {
360 return memory.recvAtomic(pkt);
361 }
362
363 void
364 MemSinkCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
365 {
366 pkt->pushLabel(memory.name());
367
368 if (!queue.trySatisfyFunctional(pkt)) {
369 // Default implementation of SimpleTimingPort::recvFunctional()
370 // calls recvAtomic() and throws away the latency; we can save a
371 // little here by just not calculating the latency.
372 memory.recvFunctional(pkt);
373 }
374
375 pkt->popLabel();
376 }
377
378 bool
379 MemSinkCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
380 {
381 return memory.recvTimingReq(pkt);
382 }
383
384 } // namespace QoS
385
386 QoSMemSinkInterface::QoSMemSinkInterface(const QoSMemSinkInterfaceParams &_p)
387 : AbstractMemory(_p)
388 {
389 }