2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "debug/Config.hh"
32 #include "debug/Drain.hh"
33 #include "debug/RubyDma.hh"
34 #include "debug/RubyStats.hh"
35 #include "mem/protocol/SequencerMsg.hh"
36 #include "mem/ruby/system/DMASequencer.hh"
37 #include "mem/ruby/system/RubySystem.hh"
38 #include "sim/system.hh"
40 DMASequencer::DMASequencer(const Params
*p
)
41 : MemObject(p
), m_ruby_system(p
->ruby_system
), m_version(p
->version
),
42 m_controller(NULL
), m_mandatory_q_ptr(NULL
),
43 m_usingRubyTester(p
->using_ruby_tester
),
44 slave_port(csprintf("%s.slave", name()), this, 0, p
->ruby_system
,
45 p
->ruby_system
->getAccessBackingStore()),
46 system(p
->system
), retry(false)
48 assert(m_version
!= -1);
55 assert(m_controller
!= NULL
);
56 m_mandatory_q_ptr
= m_controller
->getMandatoryQueue();
58 m_data_block_mask
= ~ (~0 << RubySystem::getBlockSizeBits());
60 slave_port
.sendRangeChange();
64 DMASequencer::getSlavePort(const std::string
&if_name
, PortID idx
)
66 // used by the CPUs to connect the caches to the interconnect, and
67 // for the x86 case also the interrupt master
68 if (if_name
!= "slave") {
69 // pass it along to our super class
70 return MemObject::getSlavePort(if_name
, idx
);
76 DMASequencer::MemSlavePort::MemSlavePort(const std::string
&_name
,
77 DMASequencer
*_port
, PortID id
, RubySystem
* _ruby_system
,
78 bool _access_backing_store
)
79 : QueuedSlavePort(_name
, _port
, queue
, id
), queue(*_port
, *this),
80 m_ruby_system(_ruby_system
), access_backing_store(_access_backing_store
)
82 DPRINTF(RubyDma
, "Created slave memport on ruby sequencer %s\n", _name
);
86 DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt
)
88 DPRINTF(RubyDma
, "Timing request for address %#x on port %d\n",
90 DMASequencer
*seq
= static_cast<DMASequencer
*>(&owner
);
92 if (pkt
->memInhibitAsserted())
93 panic("DMASequencer should never see an inhibited request\n");
95 assert(isPhysMemAddress(pkt
->getAddr()));
96 assert(getOffset(pkt
->getAddr()) + pkt
->getSize() <=
97 RubySystem::getBlockSizeBytes());
99 // Submit the ruby request
100 RequestStatus requestStatus
= seq
->makeRequest(pkt
);
102 // If the request successfully issued then we should return true.
103 // Otherwise, we need to tell the port to retry at a later point
105 if (requestStatus
== RequestStatus_Issued
) {
106 DPRINTF(RubyDma
, "Request %s 0x%x issued\n", pkt
->cmdString(),
111 // Unless one is using the ruby tester, record the stalled M5 port for
112 // later retry when the sequencer becomes free.
113 if (!seq
->m_usingRubyTester
) {
117 DPRINTF(RubyDma
, "Request for address %#x did not issued because %s\n",
118 pkt
->getAddr(), RequestStatus_to_string(requestStatus
));
124 DMASequencer::ruby_hit_callback(PacketPtr pkt
)
126 DPRINTF(RubyDma
, "Hit callback for %s 0x%x\n", pkt
->cmdString(),
129 // The packet was destined for memory and has not yet been turned
131 assert(system
->isMemAddr(pkt
->getAddr()));
132 assert(pkt
->isRequest());
133 slave_port
.hitCallback(pkt
);
135 // If we had to stall the slave ports, wake it up because
136 // the sequencer likely has free resources now.
139 DPRINTF(RubyDma
,"Sequencer may now be free. SendRetry to port %s\n",
141 slave_port
.sendRetryReq();
148 DMASequencer::testDrainComplete()
150 //If we weren't able to drain before, we might be able to now.
151 if (drainState() == DrainState::Draining
) {
152 unsigned int drainCount
= outstandingCount();
153 DPRINTF(Drain
, "Drain count: %u\n", drainCount
);
154 if (drainCount
== 0) {
155 DPRINTF(Drain
, "DMASequencer done draining, signaling drain done\n");
162 DMASequencer::drain()
164 if (isDeadlockEventScheduled()) {
165 descheduleDeadlockEvent();
168 // If the DMASequencer is not empty, then it needs to clear all outstanding
169 // requests before it should call signalDrainDone()
170 DPRINTF(Config
, "outstanding count %d\n", outstandingCount());
173 if (outstandingCount() > 0) {
174 DPRINTF(Drain
, "DMASequencer not drained\n");
175 return DrainState::Draining
;
177 return DrainState::Drained
;
182 DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt
)
184 bool needsResponse
= pkt
->needsResponse();
185 assert(!pkt
->isLLSC());
186 assert(!pkt
->isFlush());
188 DPRINTF(RubyDma
, "Hit callback needs response %d\n", needsResponse
);
190 // turn packet around to go back to requester if response expected
192 if (access_backing_store
) {
193 m_ruby_system
->getPhysMem()->access(pkt
);
194 } else if (needsResponse
) {
199 DPRINTF(RubyDma
, "Sending packet back over port\n");
201 DMASequencer
*seq
= static_cast<DMASequencer
*>(&owner
);
202 RubySystem
*rs
= seq
->m_ruby_system
;
203 schedTimingResp(pkt
, curTick() + rs
->clockPeriod());
208 DPRINTF(RubyDma
, "Hit callback done!\n");
212 DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr
) const
214 DMASequencer
*seq
= static_cast<DMASequencer
*>(&owner
);
215 return seq
->system
->isMemAddr(addr
);
219 DMASequencer::makeRequest(PacketPtr pkt
)
222 return RequestStatus_BufferFull
;
225 Addr paddr
= pkt
->getAddr();
226 uint8_t* data
= pkt
->getPtr
<uint8_t>();
227 int len
= pkt
->getSize();
228 bool write
= pkt
->isWrite();
230 assert(!m_is_busy
); // only support one outstanding DMA request
233 active_request
.start_paddr
= paddr
;
234 active_request
.write
= write
;
235 active_request
.data
= data
;
236 active_request
.len
= len
;
237 active_request
.bytes_completed
= 0;
238 active_request
.bytes_issued
= 0;
239 active_request
.pkt
= pkt
;
241 std::shared_ptr
<SequencerMsg
> msg
=
242 std::make_shared
<SequencerMsg
>(clockEdge());
243 msg
->getPhysicalAddress() = paddr
;
244 msg
->getLineAddress() = makeLineAddress(msg
->getPhysicalAddress());
245 msg
->getType() = write
? SequencerRequestType_ST
: SequencerRequestType_LD
;
246 int offset
= paddr
& m_data_block_mask
;
248 msg
->getLen() = (offset
+ len
) <= RubySystem::getBlockSizeBytes() ?
249 len
: RubySystem::getBlockSizeBytes() - offset
;
251 if (write
&& (data
!= NULL
)) {
252 if (active_request
.data
!= NULL
) {
253 msg
->getDataBlk().setData(data
, offset
, msg
->getLen());
257 assert(m_mandatory_q_ptr
!= NULL
);
258 m_mandatory_q_ptr
->enqueue(msg
, clockEdge(), cyclesToTicks(Cycles(1)));
259 active_request
.bytes_issued
+= msg
->getLen();
261 return RequestStatus_Issued
;
265 DMASequencer::issueNext()
268 active_request
.bytes_completed
= active_request
.bytes_issued
;
269 if (active_request
.len
== active_request
.bytes_completed
) {
271 // Must unset the busy flag before calling back the dma port because
272 // the callback may cause a previously nacked request to be reissued
274 DPRINTF(RubyDma
, "DMA request completed\n");
276 ruby_hit_callback(active_request
.pkt
);
280 std::shared_ptr
<SequencerMsg
> msg
=
281 std::make_shared
<SequencerMsg
>(clockEdge());
282 msg
->getPhysicalAddress() = active_request
.start_paddr
+
283 active_request
.bytes_completed
;
285 assert((msg
->getPhysicalAddress() & m_data_block_mask
) == 0);
286 msg
->getLineAddress() = makeLineAddress(msg
->getPhysicalAddress());
288 msg
->getType() = (active_request
.write
? SequencerRequestType_ST
:
289 SequencerRequestType_LD
);
292 (active_request
.len
-
293 active_request
.bytes_completed
< RubySystem::getBlockSizeBytes() ?
294 active_request
.len
- active_request
.bytes_completed
:
295 RubySystem::getBlockSizeBytes());
297 if (active_request
.write
) {
299 setData(&active_request
.data
[active_request
.bytes_completed
],
303 assert(m_mandatory_q_ptr
!= NULL
);
304 m_mandatory_q_ptr
->enqueue(msg
, clockEdge(), cyclesToTicks(Cycles(1)));
305 active_request
.bytes_issued
+= msg
->getLen();
307 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
308 active_request
.bytes_issued
, active_request
.bytes_completed
,
313 DMASequencer::dataCallback(const DataBlock
& dblk
)
316 int len
= active_request
.bytes_issued
- active_request
.bytes_completed
;
318 if (active_request
.bytes_completed
== 0)
319 offset
= active_request
.start_paddr
& m_data_block_mask
;
320 assert(!active_request
.write
);
321 if (active_request
.data
!= NULL
) {
322 memcpy(&active_request
.data
[active_request
.bytes_completed
],
323 dblk
.getData(offset
, len
), len
);
329 DMASequencer::ackCallback()
335 DMASequencer::recordRequestType(DMASequencerRequestType requestType
)
337 DPRINTF(RubyStats
, "Recorded statistic: %s\n",
338 DMASequencerRequestType_to_string(requestType
));
342 DMASequencerParams::create()
344 return new DMASequencer(this);