3c895e62726fe4b29ddd2a76e3041851a892c682
[gem5.git] / src / mem / ruby / system / DMASequencer.cc
1 /*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <memory>
30
31 #include "debug/Config.hh"
32 #include "debug/Drain.hh"
33 #include "debug/RubyDma.hh"
34 #include "debug/RubyStats.hh"
35 #include "mem/protocol/SequencerMsg.hh"
36 #include "mem/ruby/system/DMASequencer.hh"
37 #include "mem/ruby/system/RubySystem.hh"
38 #include "sim/system.hh"
39
40 DMASequencer::DMASequencer(const Params *p)
41 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
42 m_controller(NULL), m_mandatory_q_ptr(NULL),
43 m_usingRubyTester(p->using_ruby_tester),
44 slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system,
45 p->ruby_system->getAccessBackingStore()),
46 system(p->system), retry(false)
47 {
48 assert(m_version != -1);
49 }
50
51 void
52 DMASequencer::init()
53 {
54 MemObject::init();
55 assert(m_controller != NULL);
56 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
57 m_is_busy = false;
58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59
60 slave_port.sendRangeChange();
61 }
62
63 BaseSlavePort &
64 DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
65 {
66 // used by the CPUs to connect the caches to the interconnect, and
67 // for the x86 case also the interrupt master
68 if (if_name != "slave") {
69 // pass it along to our super class
70 return MemObject::getSlavePort(if_name, idx);
71 } else {
72 return slave_port;
73 }
74 }
75
76 DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
77 DMASequencer *_port, PortID id, RubySystem* _ruby_system,
78 bool _access_backing_store)
79 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
80 m_ruby_system(_ruby_system), access_backing_store(_access_backing_store)
81 {
82 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
83 }
84
85 bool
86 DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
87 {
88 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
89 pkt->getAddr(), id);
90 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
91
92 if (pkt->memInhibitAsserted())
93 panic("DMASequencer should never see an inhibited request\n");
94
95 assert(isPhysMemAddress(pkt->getAddr()));
96 assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
97 RubySystem::getBlockSizeBytes());
98
99 // Submit the ruby request
100 RequestStatus requestStatus = seq->makeRequest(pkt);
101
102 // If the request successfully issued then we should return true.
103 // Otherwise, we need to tell the port to retry at a later point
104 // and return false.
105 if (requestStatus == RequestStatus_Issued) {
106 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
107 pkt->getAddr());
108 return true;
109 }
110
111 // Unless one is using the ruby tester, record the stalled M5 port for
112 // later retry when the sequencer becomes free.
113 if (!seq->m_usingRubyTester) {
114 seq->retry = true;
115 }
116
117 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
118 pkt->getAddr(), RequestStatus_to_string(requestStatus));
119
120 return false;
121 }
122
123 void
124 DMASequencer::ruby_hit_callback(PacketPtr pkt)
125 {
126 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
127 pkt->getAddr());
128
129 // The packet was destined for memory and has not yet been turned
130 // into a response
131 assert(system->isMemAddr(pkt->getAddr()));
132 assert(pkt->isRequest());
133 slave_port.hitCallback(pkt);
134
135 // If we had to stall the slave ports, wake it up because
136 // the sequencer likely has free resources now.
137 if (retry) {
138 retry = false;
139 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
140 slave_port.name());
141 slave_port.sendRetryReq();
142 }
143
144 testDrainComplete();
145 }
146
147 void
148 DMASequencer::testDrainComplete()
149 {
150 //If we weren't able to drain before, we might be able to now.
151 if (drainState() == DrainState::Draining) {
152 unsigned int drainCount = outstandingCount();
153 DPRINTF(Drain, "Drain count: %u\n", drainCount);
154 if (drainCount == 0) {
155 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
156 signalDrainDone();
157 }
158 }
159 }
160
161 DrainState
162 DMASequencer::drain()
163 {
164 if (isDeadlockEventScheduled()) {
165 descheduleDeadlockEvent();
166 }
167
168 // If the DMASequencer is not empty, then it needs to clear all outstanding
169 // requests before it should call signalDrainDone()
170 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
171
172 // Set status
173 if (outstandingCount() > 0) {
174 DPRINTF(Drain, "DMASequencer not drained\n");
175 return DrainState::Draining;
176 } else {
177 return DrainState::Drained;
178 }
179 }
180
181 void
182 DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
183 {
184 bool needsResponse = pkt->needsResponse();
185 assert(!pkt->isLLSC());
186 assert(!pkt->isFlush());
187
188 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
189
190 // turn packet around to go back to requester if response expected
191
192 if (access_backing_store) {
193 m_ruby_system->getPhysMem()->access(pkt);
194 } else if (needsResponse) {
195 pkt->makeResponse();
196 }
197
198 if (needsResponse) {
199 DPRINTF(RubyDma, "Sending packet back over port\n");
200 // send next cycle
201 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
202 RubySystem *rs = seq->m_ruby_system;
203 schedTimingResp(pkt, curTick() + rs->clockPeriod());
204 } else {
205 delete pkt;
206 }
207
208 DPRINTF(RubyDma, "Hit callback done!\n");
209 }
210
211 bool
212 DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
213 {
214 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
215 return seq->system->isMemAddr(addr);
216 }
217
218 RequestStatus
219 DMASequencer::makeRequest(PacketPtr pkt)
220 {
221 if (m_is_busy) {
222 return RequestStatus_BufferFull;
223 }
224
225 Addr paddr = pkt->getAddr();
226 uint8_t* data = pkt->getPtr<uint8_t>();
227 int len = pkt->getSize();
228 bool write = pkt->isWrite();
229
230 assert(!m_is_busy); // only support one outstanding DMA request
231 m_is_busy = true;
232
233 active_request.start_paddr = paddr;
234 active_request.write = write;
235 active_request.data = data;
236 active_request.len = len;
237 active_request.bytes_completed = 0;
238 active_request.bytes_issued = 0;
239 active_request.pkt = pkt;
240
241 std::shared_ptr<SequencerMsg> msg =
242 std::make_shared<SequencerMsg>(clockEdge());
243 msg->getPhysicalAddress() = paddr;
244 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
245 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
246 int offset = paddr & m_data_block_mask;
247
248 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
249 len : RubySystem::getBlockSizeBytes() - offset;
250
251 if (write && (data != NULL)) {
252 if (active_request.data != NULL) {
253 msg->getDataBlk().setData(data, offset, msg->getLen());
254 }
255 }
256
257 assert(m_mandatory_q_ptr != NULL);
258 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
259 active_request.bytes_issued += msg->getLen();
260
261 return RequestStatus_Issued;
262 }
263
264 void
265 DMASequencer::issueNext()
266 {
267 assert(m_is_busy);
268 active_request.bytes_completed = active_request.bytes_issued;
269 if (active_request.len == active_request.bytes_completed) {
270 //
271 // Must unset the busy flag before calling back the dma port because
272 // the callback may cause a previously nacked request to be reissued
273 //
274 DPRINTF(RubyDma, "DMA request completed\n");
275 m_is_busy = false;
276 ruby_hit_callback(active_request.pkt);
277 return;
278 }
279
280 std::shared_ptr<SequencerMsg> msg =
281 std::make_shared<SequencerMsg>(clockEdge());
282 msg->getPhysicalAddress() = active_request.start_paddr +
283 active_request.bytes_completed;
284
285 assert((msg->getPhysicalAddress() & m_data_block_mask) == 0);
286 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
287
288 msg->getType() = (active_request.write ? SequencerRequestType_ST :
289 SequencerRequestType_LD);
290
291 msg->getLen() =
292 (active_request.len -
293 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
294 active_request.len - active_request.bytes_completed :
295 RubySystem::getBlockSizeBytes());
296
297 if (active_request.write) {
298 msg->getDataBlk().
299 setData(&active_request.data[active_request.bytes_completed],
300 0, msg->getLen());
301 }
302
303 assert(m_mandatory_q_ptr != NULL);
304 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
305 active_request.bytes_issued += msg->getLen();
306 DPRINTF(RubyDma,
307 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
308 active_request.bytes_issued, active_request.bytes_completed,
309 active_request.len);
310 }
311
312 void
313 DMASequencer::dataCallback(const DataBlock & dblk)
314 {
315 assert(m_is_busy);
316 int len = active_request.bytes_issued - active_request.bytes_completed;
317 int offset = 0;
318 if (active_request.bytes_completed == 0)
319 offset = active_request.start_paddr & m_data_block_mask;
320 assert(!active_request.write);
321 if (active_request.data != NULL) {
322 memcpy(&active_request.data[active_request.bytes_completed],
323 dblk.getData(offset, len), len);
324 }
325 issueNext();
326 }
327
328 void
329 DMASequencer::ackCallback()
330 {
331 issueNext();
332 }
333
334 void
335 DMASequencer::recordRequestType(DMASequencerRequestType requestType)
336 {
337 DPRINTF(RubyStats, "Recorded statistic: %s\n",
338 DMASequencerRequestType_to_string(requestType));
339 }
340
341 DMASequencer *
342 DMASequencerParams::create()
343 {
344 return new DMASequencer(this);
345 }