2 * Copyright (c) 2017,2019,2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/ruby/slicc_interface/AbstractController.hh"
43 #include "debug/RubyQueue.hh"
44 #include "mem/ruby/network/Network.hh"
45 #include "mem/ruby/protocol/MemoryMsg.hh"
46 #include "mem/ruby/system/RubySystem.hh"
47 #include "mem/ruby/system/Sequencer.hh"
48 #include "sim/system.hh"
50 AbstractController::AbstractController(const Params
*p
)
51 : ClockedObject(p
), Consumer(this), m_version(p
->version
),
52 m_clusterID(p
->cluster_id
),
53 m_id(p
->system
->getRequestorId(this)), m_is_blocking(false),
54 m_number_of_TBEs(p
->number_of_TBEs
),
55 m_transitions_per_cycle(p
->transitions_per_cycle
),
56 m_buffer_size(p
->buffer_size
), m_recycle_latency(p
->recycle_latency
),
57 m_mandatory_queue_latency(p
->mandatory_queue_latency
),
58 memoryPort(csprintf("%s.memory", name()), this),
59 addrRanges(p
->addr_ranges
.begin(), p
->addr_ranges
.end())
62 // Combine the statistics from all controllers
63 // of this particular type.
64 Stats::registerDumpCallback([this]() { collateStats(); });
69 AbstractController::init()
71 m_delayHistogram
.init(10);
72 uint32_t size
= Network::getNumberOfVirtualNetworks();
73 for (uint32_t i
= 0; i
< size
; i
++) {
74 m_delayVCHistogram
.push_back(new Stats::Histogram());
75 m_delayVCHistogram
[i
]->init(10);
78 if (getMemReqQueue()) {
79 getMemReqQueue()->setConsumer(this);
82 // Initialize the addr->downstream machine mappings. Multiple machines
83 // in downstream_destinations can have the same address range if they have
84 // different types. If this is the case, mapAddressToDownstreamMachine
85 // needs to specify the machine type
86 downstreamDestinations
.resize();
87 for (auto abs_cntrl
: params()->downstream_destinations
) {
88 MachineID mid
= abs_cntrl
->getMachineID();
89 const AddrRangeList
&ranges
= abs_cntrl
->getAddrRanges();
90 for (const auto addr_range
: ranges
) {
91 auto i
= downstreamAddrMap
.intersects(addr_range
);
92 if (i
== downstreamAddrMap
.end()) {
93 i
= downstreamAddrMap
.insert(addr_range
, AddrMapEntry());
95 AddrMapEntry
&entry
= i
->second
;
96 fatal_if(entry
.count(mid
.getType()) > 0,
97 "%s: %s mapped to multiple machines of the same type\n",
98 name(), addr_range
.to_string());
99 entry
[mid
.getType()] = mid
;
101 downstreamDestinations
.add(mid
);
107 AbstractController::resetStats()
109 m_delayHistogram
.reset();
110 uint32_t size
= Network::getNumberOfVirtualNetworks();
111 for (uint32_t i
= 0; i
< size
; i
++) {
112 m_delayVCHistogram
[i
]->reset();
117 AbstractController::regStats()
119 ClockedObject::regStats();
122 .name(name() + ".fully_busy_cycles")
123 .desc("cycles for which number of transistions == max transitions")
124 .flags(Stats::nozero
);
128 AbstractController::profileMsgDelay(uint32_t virtualNetwork
, Cycles delay
)
130 assert(virtualNetwork
< m_delayVCHistogram
.size());
131 m_delayHistogram
.sample(delay
);
132 m_delayVCHistogram
[virtualNetwork
]->sample(delay
);
136 AbstractController::stallBuffer(MessageBuffer
* buf
, Addr addr
)
138 if (m_waiting_buffers
.count(addr
) == 0) {
139 MsgVecType
* msgVec
= new MsgVecType
;
140 msgVec
->resize(m_in_ports
, NULL
);
141 m_waiting_buffers
[addr
] = msgVec
;
143 DPRINTF(RubyQueue
, "stalling %s port %d addr %#x\n", buf
, m_cur_in_port
,
145 assert(m_in_ports
> m_cur_in_port
);
146 (*(m_waiting_buffers
[addr
]))[m_cur_in_port
] = buf
;
150 AbstractController::wakeUpBuffers(Addr addr
)
152 if (m_waiting_buffers
.count(addr
) > 0) {
154 // Wake up all possible lower rank (i.e. lower priority) buffers that could
155 // be waiting on this message.
157 for (int in_port_rank
= m_cur_in_port
- 1;
160 if ((*(m_waiting_buffers
[addr
]))[in_port_rank
] != NULL
) {
161 (*(m_waiting_buffers
[addr
]))[in_port_rank
]->
162 reanalyzeMessages(addr
, clockEdge());
165 delete m_waiting_buffers
[addr
];
166 m_waiting_buffers
.erase(addr
);
171 AbstractController::wakeUpAllBuffers(Addr addr
)
173 if (m_waiting_buffers
.count(addr
) > 0) {
175 // Wake up all possible buffers that could be waiting on this message.
177 for (int in_port_rank
= m_in_ports
- 1;
180 if ((*(m_waiting_buffers
[addr
]))[in_port_rank
] != NULL
) {
181 (*(m_waiting_buffers
[addr
]))[in_port_rank
]->
182 reanalyzeMessages(addr
, clockEdge());
185 delete m_waiting_buffers
[addr
];
186 m_waiting_buffers
.erase(addr
);
191 AbstractController::wakeUpAllBuffers()
194 // Wake up all possible buffers that could be waiting on any message.
197 std::vector
<MsgVecType
*> wokeUpMsgVecs
;
198 MsgBufType wokeUpMsgBufs
;
200 if (m_waiting_buffers
.size() > 0) {
201 for (WaitingBufType::iterator buf_iter
= m_waiting_buffers
.begin();
202 buf_iter
!= m_waiting_buffers
.end();
204 for (MsgVecType::iterator vec_iter
= buf_iter
->second
->begin();
205 vec_iter
!= buf_iter
->second
->end();
208 // Make sure the MessageBuffer has not already be reanalyzed
210 if (*vec_iter
!= NULL
&&
211 (wokeUpMsgBufs
.count(*vec_iter
) == 0)) {
212 (*vec_iter
)->reanalyzeAllMessages(clockEdge());
213 wokeUpMsgBufs
.insert(*vec_iter
);
216 wokeUpMsgVecs
.push_back(buf_iter
->second
);
219 for (std::vector
<MsgVecType
*>::iterator wb_iter
= wokeUpMsgVecs
.begin();
220 wb_iter
!= wokeUpMsgVecs
.end();
225 m_waiting_buffers
.clear();
230 AbstractController::serviceMemoryQueue()
232 auto mem_queue
= getMemReqQueue();
234 if (!mem_queue
->isReady(clockEdge())) {
238 const MemoryMsg
*mem_msg
= (const MemoryMsg
*)mem_queue
->peek();
239 unsigned int req_size
= RubySystem::getBlockSizeBytes();
240 if (mem_msg
->m_Len
> 0) {
241 req_size
= mem_msg
->m_Len
;
245 = std::make_shared
<Request
>(mem_msg
->m_addr
, req_size
, 0, m_id
);
247 if (mem_msg
->getType() == MemoryRequestType_MEMORY_WB
) {
248 pkt
= Packet::createWrite(req
);
250 pkt
->setData(mem_msg
->m_DataBlk
.getData(getOffset(mem_msg
->m_addr
),
252 } else if (mem_msg
->getType() == MemoryRequestType_MEMORY_READ
) {
253 pkt
= Packet::createRead(req
);
254 uint8_t *newData
= new uint8_t[req_size
];
255 pkt
->dataDynamic(newData
);
257 panic("Unknown memory request type (%s) for addr %p",
258 MemoryRequestType_to_string(mem_msg
->getType()),
262 SenderState
*s
= new SenderState(mem_msg
->m_Sender
);
263 pkt
->pushSenderState(s
);
265 if (RubySystem::getWarmupEnabled()) {
266 // Use functional rather than timing accesses during warmup
267 mem_queue
->dequeue(clockEdge());
268 memoryPort
.sendFunctional(pkt
);
269 // Since the queue was popped the controller may be able
270 // to make more progress. Make sure it wakes up
271 scheduleEvent(Cycles(1));
273 } else if (memoryPort
.sendTimingReq(pkt
)) {
274 mem_queue
->dequeue(clockEdge());
275 // Since the queue was popped the controller may be able
276 // to make more progress. Make sure it wakes up
277 scheduleEvent(Cycles(1));
279 scheduleEvent(Cycles(1));
288 AbstractController::blockOnQueue(Addr addr
, MessageBuffer
* port
)
290 m_is_blocking
= true;
291 m_block_map
[addr
] = port
;
295 AbstractController::isBlocked(Addr addr
) const
297 return m_is_blocking
&& (m_block_map
.find(addr
) != m_block_map
.end());
301 AbstractController::unblock(Addr addr
)
303 m_block_map
.erase(addr
);
304 if (m_block_map
.size() == 0) {
305 m_is_blocking
= false;
310 AbstractController::isBlocked(Addr addr
)
312 return (m_block_map
.count(addr
) > 0);
316 AbstractController::getPort(const std::string
&if_name
, PortID idx
)
322 AbstractController::functionalMemoryRead(PacketPtr pkt
)
324 memoryPort
.sendFunctional(pkt
);
328 AbstractController::functionalMemoryWrite(PacketPtr pkt
)
330 int num_functional_writes
= 0;
332 // Update memory itself.
333 memoryPort
.sendFunctional(pkt
);
334 return num_functional_writes
+ 1;
338 AbstractController::recvTimingResp(PacketPtr pkt
)
340 assert(getMemRespQueue());
341 assert(pkt
->isResponse());
343 std::shared_ptr
<MemoryMsg
> msg
= std::make_shared
<MemoryMsg
>(clockEdge());
344 (*msg
).m_addr
= pkt
->getAddr();
345 (*msg
).m_Sender
= m_machineID
;
347 SenderState
*s
= dynamic_cast<SenderState
*>(pkt
->senderState
);
348 (*msg
).m_OriginalRequestorMachId
= s
->id
;
352 (*msg
).m_Type
= MemoryRequestType_MEMORY_READ
;
353 (*msg
).m_MessageSize
= MessageSizeType_Response_Data
;
355 // Copy data from the packet
356 (*msg
).m_DataBlk
.setData(pkt
->getPtr
<uint8_t>(), 0,
357 RubySystem::getBlockSizeBytes());
358 } else if (pkt
->isWrite()) {
359 (*msg
).m_Type
= MemoryRequestType_MEMORY_WB
;
360 (*msg
).m_MessageSize
= MessageSizeType_Writeback_Control
;
362 panic("Incorrect packet type received from memory controller!");
365 getMemRespQueue()->enqueue(msg
, clockEdge(), cyclesToTicks(Cycles(1)));
370 AbstractController::recvAtomic(PacketPtr pkt
)
372 return ticksToCycles(memoryPort
.sendAtomic(pkt
));
376 AbstractController::mapAddressToMachine(Addr addr
, MachineType mtype
) const
378 NodeID node
= m_net_ptr
->addressToNodeID(addr
, mtype
);
379 MachineID mach
= {mtype
, node
};
384 AbstractController::mapAddressToDownstreamMachine(Addr addr
, MachineType mtype
)
387 const auto i
= downstreamAddrMap
.contains(addr
);
388 fatal_if(i
== downstreamAddrMap
.end(),
389 "%s: couldn't find mapping for address %x\n", name(), addr
);
391 const AddrMapEntry
&entry
= i
->second
;
392 assert(!entry
.empty());
394 if (mtype
== MachineType_NUM
) {
395 fatal_if(entry
.size() > 1,
396 "%s: address %x mapped to multiple machine types.\n", name(), addr
);
397 return entry
.begin()->second
;
399 auto j
= entry
.find(mtype
);
400 fatal_if(j
== entry
.end(),
401 "%s: couldn't find mapping for address %x\n", name(), addr
);
408 AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt
)
410 controller
->recvTimingResp(pkt
);
415 AbstractController::MemoryPort::recvReqRetry()
417 controller
->serviceMemoryQueue();
420 AbstractController::MemoryPort::MemoryPort(const std::string
&_name
,
421 AbstractController
*_controller
,
423 : RequestPort(_name
, _controller
, id
), controller(_controller
)