2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "mem/ruby/slicc_interface/AbstractController.hh"
31 #include "debug/RubyQueue.hh"
32 #include "mem/protocol/MemoryMsg.hh"
33 #include "mem/ruby/system/RubySystem.hh"
34 #include "mem/ruby/system/Sequencer.hh"
35 #include "sim/system.hh"
37 AbstractController::AbstractController(const Params
*p
)
38 : MemObject(p
), Consumer(this), m_version(p
->version
),
39 m_clusterID(p
->cluster_id
),
40 m_masterId(p
->system
->getMasterId(name())), m_is_blocking(false),
41 m_number_of_TBEs(p
->number_of_TBEs
),
42 m_transitions_per_cycle(p
->transitions_per_cycle
),
43 m_buffer_size(p
->buffer_size
), m_recycle_latency(p
->recycle_latency
),
44 memoryPort(csprintf("%s.memory", name()), this, "")
47 // Combine the statistics from all controllers
48 // of this particular type.
49 Stats::registerDumpCallback(new StatsCallback(this));
54 AbstractController::init()
56 params()->ruby_system
->registerAbstractController(this);
57 m_delayHistogram
.init(10);
58 uint32_t size
= Network::getNumberOfVirtualNetworks();
59 for (uint32_t i
= 0; i
< size
; i
++) {
60 m_delayVCHistogram
.push_back(new Stats::Histogram());
61 m_delayVCHistogram
[i
]->init(10);
66 AbstractController::resetStats()
68 m_delayHistogram
.reset();
69 uint32_t size
= Network::getNumberOfVirtualNetworks();
70 for (uint32_t i
= 0; i
< size
; i
++) {
71 m_delayVCHistogram
[i
]->reset();
76 AbstractController::regStats()
79 .name(name() + ".fully_busy_cycles")
80 .desc("cycles for which number of transistions == max transitions")
81 .flags(Stats::nozero
);
85 AbstractController::profileMsgDelay(uint32_t virtualNetwork
, Cycles delay
)
87 assert(virtualNetwork
< m_delayVCHistogram
.size());
88 m_delayHistogram
.sample(delay
);
89 m_delayVCHistogram
[virtualNetwork
]->sample(delay
);
93 AbstractController::stallBuffer(MessageBuffer
* buf
, Addr addr
)
95 if (m_waiting_buffers
.count(addr
) == 0) {
96 MsgVecType
* msgVec
= new MsgVecType
;
97 msgVec
->resize(m_in_ports
, NULL
);
98 m_waiting_buffers
[addr
] = msgVec
;
100 DPRINTF(RubyQueue
, "stalling %s port %d addr %s\n", buf
, m_cur_in_port
,
102 assert(m_in_ports
> m_cur_in_port
);
103 (*(m_waiting_buffers
[addr
]))[m_cur_in_port
] = buf
;
107 AbstractController::wakeUpBuffers(Addr addr
)
109 if (m_waiting_buffers
.count(addr
) > 0) {
111 // Wake up all possible lower rank (i.e. lower priority) buffers that could
112 // be waiting on this message.
114 for (int in_port_rank
= m_cur_in_port
- 1;
117 if ((*(m_waiting_buffers
[addr
]))[in_port_rank
] != NULL
) {
118 (*(m_waiting_buffers
[addr
]))[in_port_rank
]->
119 reanalyzeMessages(addr
, clockEdge());
122 delete m_waiting_buffers
[addr
];
123 m_waiting_buffers
.erase(addr
);
128 AbstractController::wakeUpAllBuffers(Addr addr
)
130 if (m_waiting_buffers
.count(addr
) > 0) {
132 // Wake up all possible lower rank (i.e. lower priority) buffers that could
133 // be waiting on this message.
135 for (int in_port_rank
= m_in_ports
- 1;
138 if ((*(m_waiting_buffers
[addr
]))[in_port_rank
] != NULL
) {
139 (*(m_waiting_buffers
[addr
]))[in_port_rank
]->
140 reanalyzeMessages(addr
, clockEdge());
143 delete m_waiting_buffers
[addr
];
144 m_waiting_buffers
.erase(addr
);
149 AbstractController::wakeUpAllBuffers()
152 // Wake up all possible buffers that could be waiting on any message.
155 std::vector
<MsgVecType
*> wokeUpMsgVecs
;
156 MsgBufType wokeUpMsgBufs
;
158 if(m_waiting_buffers
.size() > 0) {
159 for (WaitingBufType::iterator buf_iter
= m_waiting_buffers
.begin();
160 buf_iter
!= m_waiting_buffers
.end();
162 for (MsgVecType::iterator vec_iter
= buf_iter
->second
->begin();
163 vec_iter
!= buf_iter
->second
->end();
166 // Make sure the MessageBuffer has not already be reanalyzed
168 if (*vec_iter
!= NULL
&&
169 (wokeUpMsgBufs
.count(*vec_iter
) == 0)) {
170 (*vec_iter
)->reanalyzeAllMessages(clockEdge());
171 wokeUpMsgBufs
.insert(*vec_iter
);
174 wokeUpMsgVecs
.push_back(buf_iter
->second
);
177 for (std::vector
<MsgVecType
*>::iterator wb_iter
= wokeUpMsgVecs
.begin();
178 wb_iter
!= wokeUpMsgVecs
.end();
183 m_waiting_buffers
.clear();
188 AbstractController::blockOnQueue(Addr addr
, MessageBuffer
* port
)
190 m_is_blocking
= true;
191 m_block_map
[addr
] = port
;
195 AbstractController::unblock(Addr addr
)
197 m_block_map
.erase(addr
);
198 if (m_block_map
.size() == 0) {
199 m_is_blocking
= false;
204 AbstractController::getMasterPort(const std::string
&if_name
,
211 AbstractController::queueMemoryRead(const MachineID
&id
, Addr addr
,
214 RequestPtr req
= new Request(addr
, RubySystem::getBlockSizeBytes(), 0,
217 PacketPtr pkt
= Packet::createRead(req
);
218 uint8_t *newData
= new uint8_t[RubySystem::getBlockSizeBytes()];
219 pkt
->dataDynamic(newData
);
221 SenderState
*s
= new SenderState(id
);
222 pkt
->pushSenderState(s
);
224 // Use functional rather than timing accesses during warmup
225 if (RubySystem::getWarmupEnabled()) {
226 memoryPort
.sendFunctional(pkt
);
231 memoryPort
.schedTimingReq(pkt
, clockEdge(latency
));
235 AbstractController::queueMemoryWrite(const MachineID
&id
, Addr addr
,
236 Cycles latency
, const DataBlock
&block
)
238 RequestPtr req
= new Request(addr
, RubySystem::getBlockSizeBytes(), 0,
241 PacketPtr pkt
= Packet::createWrite(req
);
242 uint8_t *newData
= new uint8_t[RubySystem::getBlockSizeBytes()];
243 pkt
->dataDynamic(newData
);
244 memcpy(newData
, block
.getData(0, RubySystem::getBlockSizeBytes()),
245 RubySystem::getBlockSizeBytes());
247 SenderState
*s
= new SenderState(id
);
248 pkt
->pushSenderState(s
);
250 // Use functional rather than timing accesses during warmup
251 if (RubySystem::getWarmupEnabled()) {
252 memoryPort
.sendFunctional(pkt
);
257 // Create a block and copy data from the block.
258 memoryPort
.schedTimingReq(pkt
, clockEdge(latency
));
262 AbstractController::queueMemoryWritePartial(const MachineID
&id
, Addr addr
,
264 const DataBlock
&block
, int size
)
266 RequestPtr req
= new Request(addr
, RubySystem::getBlockSizeBytes(), 0,
269 PacketPtr pkt
= Packet::createWrite(req
);
270 uint8_t *newData
= new uint8_t[size
];
271 pkt
->dataDynamic(newData
);
272 memcpy(newData
, block
.getData(getOffset(addr
), size
), size
);
274 SenderState
*s
= new SenderState(id
);
275 pkt
->pushSenderState(s
);
277 // Create a block and copy data from the block.
278 memoryPort
.schedTimingReq(pkt
, clockEdge(latency
));
282 AbstractController::functionalMemoryRead(PacketPtr pkt
)
284 memoryPort
.sendFunctional(pkt
);
288 AbstractController::functionalMemoryWrite(PacketPtr pkt
)
290 int num_functional_writes
= 0;
292 // Check the buffer from the controller to the memory.
293 if (memoryPort
.checkFunctional(pkt
)) {
294 num_functional_writes
++;
297 // Update memory itself.
298 memoryPort
.sendFunctional(pkt
);
299 return num_functional_writes
+ 1;
303 AbstractController::recvTimingResp(PacketPtr pkt
)
305 assert(getMemoryQueue());
306 assert(pkt
->isResponse());
308 std::shared_ptr
<MemoryMsg
> msg
= std::make_shared
<MemoryMsg
>(clockEdge());
309 (*msg
).m_addr
= pkt
->getAddr();
310 (*msg
).m_Sender
= m_machineID
;
312 SenderState
*s
= dynamic_cast<SenderState
*>(pkt
->senderState
);
313 (*msg
).m_OriginalRequestorMachId
= s
->id
;
317 (*msg
).m_Type
= MemoryRequestType_MEMORY_READ
;
318 (*msg
).m_MessageSize
= MessageSizeType_Response_Data
;
320 // Copy data from the packet
321 (*msg
).m_DataBlk
.setData(pkt
->getPtr
<uint8_t>(), 0,
322 RubySystem::getBlockSizeBytes());
323 } else if (pkt
->isWrite()) {
324 (*msg
).m_Type
= MemoryRequestType_MEMORY_WB
;
325 (*msg
).m_MessageSize
= MessageSizeType_Writeback_Control
;
327 panic("Incorrect packet type received from memory controller!");
330 getMemoryQueue()->enqueue(msg
, clockEdge(), cyclesToTicks(Cycles(1)));
335 AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt
)
337 controller
->recvTimingResp(pkt
);
341 AbstractController::MemoryPort::MemoryPort(const std::string
&_name
,
342 AbstractController
*_controller
,
343 const std::string
&_label
)
344 : QueuedMasterPort(_name
, _controller
, reqQueue
, snoopRespQueue
),
345 reqQueue(*_controller
, *this, _label
),
346 snoopRespQueue(*_controller
, *this, _label
),
347 controller(_controller
)