mem-ruby: more specialized address to node mapping
[gem5.git] / src / mem / ruby / slicc_interface / AbstractController.cc
1 /*
2 * Copyright (c) 2017,2019,2020 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/ruby/slicc_interface/AbstractController.hh"
42
43 #include "debug/RubyQueue.hh"
44 #include "mem/ruby/network/Network.hh"
45 #include "mem/ruby/protocol/MemoryMsg.hh"
46 #include "mem/ruby/system/RubySystem.hh"
47 #include "mem/ruby/system/Sequencer.hh"
48 #include "sim/system.hh"
49
50 AbstractController::AbstractController(const Params *p)
51 : ClockedObject(p), Consumer(this), m_version(p->version),
52 m_clusterID(p->cluster_id),
53 m_id(p->system->getRequestorId(this)), m_is_blocking(false),
54 m_number_of_TBEs(p->number_of_TBEs),
55 m_transitions_per_cycle(p->transitions_per_cycle),
56 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
57 m_mandatory_queue_latency(p->mandatory_queue_latency),
58 memoryPort(csprintf("%s.memory", name()), this),
59 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end())
60 {
61 if (m_version == 0) {
62 // Combine the statistics from all controllers
63 // of this particular type.
64 Stats::registerDumpCallback([this]() { collateStats(); });
65 }
66 }
67
68 void
69 AbstractController::init()
70 {
71 m_delayHistogram.init(10);
72 uint32_t size = Network::getNumberOfVirtualNetworks();
73 for (uint32_t i = 0; i < size; i++) {
74 m_delayVCHistogram.push_back(new Stats::Histogram());
75 m_delayVCHistogram[i]->init(10);
76 }
77
78 if (getMemReqQueue()) {
79 getMemReqQueue()->setConsumer(this);
80 }
81
82 // Initialize the addr->downstream machine mappings. Multiple machines
83 // in downstream_destinations can have the same address range if they have
84 // different types. If this is the case, mapAddressToDownstreamMachine
85 // needs to specify the machine type
86 downstreamDestinations.resize();
87 for (auto abs_cntrl : params()->downstream_destinations) {
88 MachineID mid = abs_cntrl->getMachineID();
89 const AddrRangeList &ranges = abs_cntrl->getAddrRanges();
90 for (const auto addr_range : ranges) {
91 auto i = downstreamAddrMap.intersects(addr_range);
92 if (i == downstreamAddrMap.end()) {
93 i = downstreamAddrMap.insert(addr_range, AddrMapEntry());
94 }
95 AddrMapEntry &entry = i->second;
96 fatal_if(entry.count(mid.getType()) > 0,
97 "%s: %s mapped to multiple machines of the same type\n",
98 name(), addr_range.to_string());
99 entry[mid.getType()] = mid;
100 }
101 downstreamDestinations.add(mid);
102 }
103
104 }
105
106 void
107 AbstractController::resetStats()
108 {
109 m_delayHistogram.reset();
110 uint32_t size = Network::getNumberOfVirtualNetworks();
111 for (uint32_t i = 0; i < size; i++) {
112 m_delayVCHistogram[i]->reset();
113 }
114 }
115
116 void
117 AbstractController::regStats()
118 {
119 ClockedObject::regStats();
120
121 m_fully_busy_cycles
122 .name(name() + ".fully_busy_cycles")
123 .desc("cycles for which number of transistions == max transitions")
124 .flags(Stats::nozero);
125 }
126
127 void
128 AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
129 {
130 assert(virtualNetwork < m_delayVCHistogram.size());
131 m_delayHistogram.sample(delay);
132 m_delayVCHistogram[virtualNetwork]->sample(delay);
133 }
134
135 void
136 AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
137 {
138 if (m_waiting_buffers.count(addr) == 0) {
139 MsgVecType* msgVec = new MsgVecType;
140 msgVec->resize(m_in_ports, NULL);
141 m_waiting_buffers[addr] = msgVec;
142 }
143 DPRINTF(RubyQueue, "stalling %s port %d addr %#x\n", buf, m_cur_in_port,
144 addr);
145 assert(m_in_ports > m_cur_in_port);
146 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
147 }
148
149 void
150 AbstractController::wakeUpBuffers(Addr addr)
151 {
152 if (m_waiting_buffers.count(addr) > 0) {
153 //
154 // Wake up all possible lower rank (i.e. lower priority) buffers that could
155 // be waiting on this message.
156 //
157 for (int in_port_rank = m_cur_in_port - 1;
158 in_port_rank >= 0;
159 in_port_rank--) {
160 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
161 (*(m_waiting_buffers[addr]))[in_port_rank]->
162 reanalyzeMessages(addr, clockEdge());
163 }
164 }
165 delete m_waiting_buffers[addr];
166 m_waiting_buffers.erase(addr);
167 }
168 }
169
170 void
171 AbstractController::wakeUpAllBuffers(Addr addr)
172 {
173 if (m_waiting_buffers.count(addr) > 0) {
174 //
175 // Wake up all possible buffers that could be waiting on this message.
176 //
177 for (int in_port_rank = m_in_ports - 1;
178 in_port_rank >= 0;
179 in_port_rank--) {
180 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
181 (*(m_waiting_buffers[addr]))[in_port_rank]->
182 reanalyzeMessages(addr, clockEdge());
183 }
184 }
185 delete m_waiting_buffers[addr];
186 m_waiting_buffers.erase(addr);
187 }
188 }
189
190 void
191 AbstractController::wakeUpAllBuffers()
192 {
193 //
194 // Wake up all possible buffers that could be waiting on any message.
195 //
196
197 std::vector<MsgVecType*> wokeUpMsgVecs;
198 MsgBufType wokeUpMsgBufs;
199
200 if (m_waiting_buffers.size() > 0) {
201 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
202 buf_iter != m_waiting_buffers.end();
203 ++buf_iter) {
204 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
205 vec_iter != buf_iter->second->end();
206 ++vec_iter) {
207 //
208 // Make sure the MessageBuffer has not already be reanalyzed
209 //
210 if (*vec_iter != NULL &&
211 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
212 (*vec_iter)->reanalyzeAllMessages(clockEdge());
213 wokeUpMsgBufs.insert(*vec_iter);
214 }
215 }
216 wokeUpMsgVecs.push_back(buf_iter->second);
217 }
218
219 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
220 wb_iter != wokeUpMsgVecs.end();
221 ++wb_iter) {
222 delete (*wb_iter);
223 }
224
225 m_waiting_buffers.clear();
226 }
227 }
228
229 bool
230 AbstractController::serviceMemoryQueue()
231 {
232 auto mem_queue = getMemReqQueue();
233 assert(mem_queue);
234 if (!mem_queue->isReady(clockEdge())) {
235 return false;
236 }
237
238 const MemoryMsg *mem_msg = (const MemoryMsg*)mem_queue->peek();
239 unsigned int req_size = RubySystem::getBlockSizeBytes();
240 if (mem_msg->m_Len > 0) {
241 req_size = mem_msg->m_Len;
242 }
243
244 RequestPtr req
245 = std::make_shared<Request>(mem_msg->m_addr, req_size, 0, m_id);
246 PacketPtr pkt;
247 if (mem_msg->getType() == MemoryRequestType_MEMORY_WB) {
248 pkt = Packet::createWrite(req);
249 pkt->allocate();
250 pkt->setData(mem_msg->m_DataBlk.getData(getOffset(mem_msg->m_addr),
251 req_size));
252 } else if (mem_msg->getType() == MemoryRequestType_MEMORY_READ) {
253 pkt = Packet::createRead(req);
254 uint8_t *newData = new uint8_t[req_size];
255 pkt->dataDynamic(newData);
256 } else {
257 panic("Unknown memory request type (%s) for addr %p",
258 MemoryRequestType_to_string(mem_msg->getType()),
259 mem_msg->m_addr);
260 }
261
262 SenderState *s = new SenderState(mem_msg->m_Sender);
263 pkt->pushSenderState(s);
264
265 if (RubySystem::getWarmupEnabled()) {
266 // Use functional rather than timing accesses during warmup
267 mem_queue->dequeue(clockEdge());
268 memoryPort.sendFunctional(pkt);
269 // Since the queue was popped the controller may be able
270 // to make more progress. Make sure it wakes up
271 scheduleEvent(Cycles(1));
272 recvTimingResp(pkt);
273 } else if (memoryPort.sendTimingReq(pkt)) {
274 mem_queue->dequeue(clockEdge());
275 // Since the queue was popped the controller may be able
276 // to make more progress. Make sure it wakes up
277 scheduleEvent(Cycles(1));
278 } else {
279 scheduleEvent(Cycles(1));
280 delete pkt;
281 delete s;
282 }
283
284 return true;
285 }
286
287 void
288 AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
289 {
290 m_is_blocking = true;
291 m_block_map[addr] = port;
292 }
293
294 bool
295 AbstractController::isBlocked(Addr addr) const
296 {
297 return m_is_blocking && (m_block_map.find(addr) != m_block_map.end());
298 }
299
300 void
301 AbstractController::unblock(Addr addr)
302 {
303 m_block_map.erase(addr);
304 if (m_block_map.size() == 0) {
305 m_is_blocking = false;
306 }
307 }
308
309 bool
310 AbstractController::isBlocked(Addr addr)
311 {
312 return (m_block_map.count(addr) > 0);
313 }
314
315 Port &
316 AbstractController::getPort(const std::string &if_name, PortID idx)
317 {
318 return memoryPort;
319 }
320
321 void
322 AbstractController::functionalMemoryRead(PacketPtr pkt)
323 {
324 memoryPort.sendFunctional(pkt);
325 }
326
327 int
328 AbstractController::functionalMemoryWrite(PacketPtr pkt)
329 {
330 int num_functional_writes = 0;
331
332 // Update memory itself.
333 memoryPort.sendFunctional(pkt);
334 return num_functional_writes + 1;
335 }
336
337 void
338 AbstractController::recvTimingResp(PacketPtr pkt)
339 {
340 assert(getMemRespQueue());
341 assert(pkt->isResponse());
342
343 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
344 (*msg).m_addr = pkt->getAddr();
345 (*msg).m_Sender = m_machineID;
346
347 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
348 (*msg).m_OriginalRequestorMachId = s->id;
349 delete s;
350
351 if (pkt->isRead()) {
352 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
353 (*msg).m_MessageSize = MessageSizeType_Response_Data;
354
355 // Copy data from the packet
356 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
357 RubySystem::getBlockSizeBytes());
358 } else if (pkt->isWrite()) {
359 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
360 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
361 } else {
362 panic("Incorrect packet type received from memory controller!");
363 }
364
365 getMemRespQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
366 delete pkt;
367 }
368
369 Tick
370 AbstractController::recvAtomic(PacketPtr pkt)
371 {
372 return ticksToCycles(memoryPort.sendAtomic(pkt));
373 }
374
375 MachineID
376 AbstractController::mapAddressToMachine(Addr addr, MachineType mtype) const
377 {
378 NodeID node = m_net_ptr->addressToNodeID(addr, mtype);
379 MachineID mach = {mtype, node};
380 return mach;
381 }
382
383 MachineID
384 AbstractController::mapAddressToDownstreamMachine(Addr addr, MachineType mtype)
385 const
386 {
387 const auto i = downstreamAddrMap.contains(addr);
388 fatal_if(i == downstreamAddrMap.end(),
389 "%s: couldn't find mapping for address %x\n", name(), addr);
390
391 const AddrMapEntry &entry = i->second;
392 assert(!entry.empty());
393
394 if (mtype == MachineType_NUM) {
395 fatal_if(entry.size() > 1,
396 "%s: address %x mapped to multiple machine types.\n", name(), addr);
397 return entry.begin()->second;
398 } else {
399 auto j = entry.find(mtype);
400 fatal_if(j == entry.end(),
401 "%s: couldn't find mapping for address %x\n", name(), addr);
402 return j->second;
403 }
404 }
405
406
407 bool
408 AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
409 {
410 controller->recvTimingResp(pkt);
411 return true;
412 }
413
414 void
415 AbstractController::MemoryPort::recvReqRetry()
416 {
417 controller->serviceMemoryQueue();
418 }
419
420 AbstractController::MemoryPort::MemoryPort(const std::string &_name,
421 AbstractController *_controller,
422 PortID id)
423 : RequestPort(_name, _controller, id), controller(_controller)
424 {
425 }