b3631f1927b1e54c348fed2197f42a6e657c73b9
[gem5.git] / src / mem / ruby / network / MessageBuffer.cc
1 /*
2 * Copyright (c) 2019,2020 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include "mem/ruby/network/MessageBuffer.hh"
42
43 #include <cassert>
44
45 #include "base/cprintf.hh"
46 #include "base/logging.hh"
47 #include "base/random.hh"
48 #include "base/stl_helpers.hh"
49 #include "debug/RubyQueue.hh"
50 #include "mem/ruby/system/RubySystem.hh"
51
52 using namespace std;
53 using m5::stl_helpers::operator<<;
54
55 MessageBuffer::MessageBuffer(const Params &p)
56 : SimObject(p), m_stall_map_size(0),
57 m_max_size(p.buffer_size), m_time_last_time_size_checked(0),
58 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
59 m_last_arrival_time(0), m_strict_fifo(p.ordered),
60 m_randomization(p.randomization),
61 m_allow_zero_latency(p.allow_zero_latency)
62 {
63 m_msg_counter = 0;
64 m_consumer = NULL;
65 m_size_last_time_size_checked = 0;
66 m_size_at_cycle_start = 0;
67 m_stalled_at_cycle_start = 0;
68 m_msgs_this_cycle = 0;
69 m_priority_rank = 0;
70
71 m_stall_msg_map.clear();
72 m_input_link_id = 0;
73 m_vnet_id = 0;
74
75 m_buf_msgs = 0;
76 m_stall_time = 0;
77
78 m_dequeue_callback = nullptr;
79 }
80
81 unsigned int
82 MessageBuffer::getSize(Tick curTime)
83 {
84 if (m_time_last_time_size_checked != curTime) {
85 m_time_last_time_size_checked = curTime;
86 m_size_last_time_size_checked = m_prio_heap.size();
87 }
88
89 return m_size_last_time_size_checked;
90 }
91
92 bool
93 MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
94 {
95
96 // fast path when message buffers have infinite size
97 if (m_max_size == 0) {
98 return true;
99 }
100
101 // determine the correct size for the current cycle
102 // pop operations shouldn't effect the network's visible size
103 // until schd cycle, but enqueue operations effect the visible
104 // size immediately
105 unsigned int current_size = 0;
106 unsigned int current_stall_size = 0;
107
108 if (m_time_last_time_pop < current_time) {
109 // no pops this cycle - heap and stall queue size is correct
110 current_size = m_prio_heap.size();
111 current_stall_size = m_stall_map_size;
112 } else {
113 if (m_time_last_time_enqueue < current_time) {
114 // no enqueues this cycle - m_size_at_cycle_start is correct
115 current_size = m_size_at_cycle_start;
116 } else {
117 // both pops and enqueues occured this cycle - add new
118 // enqueued msgs to m_size_at_cycle_start
119 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
120 }
121
122 // Stall queue size at start is considered
123 current_stall_size = m_stalled_at_cycle_start;
124 }
125
126 // now compare the new size with our max size
127 if (current_size + current_stall_size + n <= m_max_size) {
128 return true;
129 } else {
130 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
131 "m_max_size: %d\n",
132 n, current_size + current_stall_size,
133 m_prio_heap.size(), m_max_size);
134 m_not_avail_count++;
135 return false;
136 }
137 }
138
139 const Message*
140 MessageBuffer::peek() const
141 {
142 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
143 const Message* msg_ptr = m_prio_heap.front().get();
144 assert(msg_ptr);
145
146 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
147 return msg_ptr;
148 }
149
150 // FIXME - move me somewhere else
151 Tick
152 random_time()
153 {
154 Tick time = 1;
155 time += random_mt.random(0, 3); // [0...3]
156 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
157 time += 100 + random_mt.random(1, 15); // 100 + [1...15]
158 }
159 return time;
160 }
161
162 void
163 MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
164 {
165 // record current time incase we have a pop that also adjusts my size
166 if (m_time_last_time_enqueue < current_time) {
167 m_msgs_this_cycle = 0; // first msg this cycle
168 m_time_last_time_enqueue = current_time;
169 }
170
171 m_msg_counter++;
172 m_msgs_this_cycle++;
173
174 // Calculate the arrival time of the message, that is, the first
175 // cycle the message can be dequeued.
176 assert((delta > 0) || m_allow_zero_latency);
177 Tick arrival_time = 0;
178
179 // random delays are inserted if the RubySystem level randomization flag
180 // is turned on and this buffer allows it
181 if ((m_randomization == MessageRandomization::disabled) ||
182 ((m_randomization == MessageRandomization::ruby_system) &&
183 !RubySystem::getRandomization())) {
184 // No randomization
185 arrival_time = current_time + delta;
186 } else {
187 // Randomization - ignore delta
188 if (m_strict_fifo) {
189 if (m_last_arrival_time < current_time) {
190 m_last_arrival_time = current_time;
191 }
192 arrival_time = m_last_arrival_time + random_time();
193 } else {
194 arrival_time = current_time + random_time();
195 }
196 }
197
198 // Check the arrival time
199 assert(arrival_time >= current_time);
200 if (m_strict_fifo) {
201 if (arrival_time < m_last_arrival_time) {
202 panic("FIFO ordering violated: %s name: %s current time: %d "
203 "delta: %d arrival_time: %d last arrival_time: %d\n",
204 *this, name(), current_time, delta, arrival_time,
205 m_last_arrival_time);
206 }
207 }
208
209 // If running a cache trace, don't worry about the last arrival checks
210 if (!RubySystem::getWarmupEnabled()) {
211 m_last_arrival_time = arrival_time;
212 }
213
214 // compute the delay cycles and set enqueue time
215 Message* msg_ptr = message.get();
216 assert(msg_ptr != NULL);
217
218 assert(current_time >= msg_ptr->getLastEnqueueTime() &&
219 "ensure we aren't dequeued early");
220
221 msg_ptr->updateDelayedTicks(current_time);
222 msg_ptr->setLastEnqueueTime(arrival_time);
223 msg_ptr->setMsgCounter(m_msg_counter);
224
225 // Insert the message into the priority heap
226 m_prio_heap.push_back(message);
227 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
228 // Increment the number of messages statistic
229 m_buf_msgs++;
230
231 assert((m_max_size == 0) ||
232 ((m_prio_heap.size() + m_stall_map_size) <= m_max_size));
233
234 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
235 arrival_time, *(message.get()));
236
237 // Schedule the wakeup
238 assert(m_consumer != NULL);
239 m_consumer->scheduleEventAbsolute(arrival_time);
240 m_consumer->storeEventInfo(m_vnet_id);
241 }
242
243 Tick
244 MessageBuffer::dequeue(Tick current_time, bool decrement_messages)
245 {
246 DPRINTF(RubyQueue, "Popping\n");
247 assert(isReady(current_time));
248
249 // get MsgPtr of the message about to be dequeued
250 MsgPtr message = m_prio_heap.front();
251
252 // get the delay cycles
253 message->updateDelayedTicks(current_time);
254 Tick delay = message->getDelayedTicks();
255
256 m_stall_time = curTick() - message->getTime();
257
258 // record previous size and time so the current buffer size isn't
259 // adjusted until schd cycle
260 if (m_time_last_time_pop < current_time) {
261 m_size_at_cycle_start = m_prio_heap.size();
262 m_stalled_at_cycle_start = m_stall_map_size;
263 m_time_last_time_pop = current_time;
264 }
265
266 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
267 m_prio_heap.pop_back();
268 if (decrement_messages) {
269 // If the message will be removed from the queue, decrement the
270 // number of message in the queue.
271 m_buf_msgs--;
272 }
273
274 // if a dequeue callback was requested, call it now
275 if (m_dequeue_callback) {
276 m_dequeue_callback();
277 }
278
279 return delay;
280 }
281
282 void
283 MessageBuffer::registerDequeueCallback(std::function<void()> callback)
284 {
285 m_dequeue_callback = callback;
286 }
287
288 void
289 MessageBuffer::unregisterDequeueCallback()
290 {
291 m_dequeue_callback = nullptr;
292 }
293
294 void
295 MessageBuffer::clear()
296 {
297 m_prio_heap.clear();
298
299 m_msg_counter = 0;
300 m_time_last_time_enqueue = 0;
301 m_time_last_time_pop = 0;
302 m_size_at_cycle_start = 0;
303 m_stalled_at_cycle_start = 0;
304 m_msgs_this_cycle = 0;
305 }
306
307 void
308 MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
309 {
310 DPRINTF(RubyQueue, "Recycling.\n");
311 assert(isReady(current_time));
312 MsgPtr node = m_prio_heap.front();
313 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
314
315 Tick future_time = current_time + recycle_latency;
316 node->setLastEnqueueTime(future_time);
317
318 m_prio_heap.back() = node;
319 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
320 m_consumer->scheduleEventAbsolute(future_time);
321 }
322
323 void
324 MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
325 {
326 while (!lt.empty()) {
327 MsgPtr m = lt.front();
328 assert(m->getLastEnqueueTime() <= schdTick);
329
330 m_prio_heap.push_back(m);
331 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
332 greater<MsgPtr>());
333
334 m_consumer->scheduleEventAbsolute(schdTick);
335
336 DPRINTF(RubyQueue, "Requeue arrival_time: %lld, Message: %s\n",
337 schdTick, *(m.get()));
338
339 lt.pop_front();
340 }
341 }
342
343 void
344 MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
345 {
346 DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
347 assert(m_stall_msg_map.count(addr) > 0);
348
349 //
350 // Put all stalled messages associated with this address back on the
351 // prio heap. The reanalyzeList call will make sure the consumer is
352 // scheduled for the current cycle so that the previously stalled messages
353 // will be observed before any younger messages that may arrive this cycle
354 //
355 m_stall_map_size -= m_stall_msg_map[addr].size();
356 assert(m_stall_map_size >= 0);
357 reanalyzeList(m_stall_msg_map[addr], current_time);
358 m_stall_msg_map.erase(addr);
359 }
360
361 void
362 MessageBuffer::reanalyzeAllMessages(Tick current_time)
363 {
364 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
365
366 //
367 // Put all stalled messages associated with this address back on the
368 // prio heap. The reanalyzeList call will make sure the consumer is
369 // scheduled for the current cycle so that the previously stalled messages
370 // will be observed before any younger messages that may arrive this cycle.
371 //
372 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
373 map_iter != m_stall_msg_map.end(); ++map_iter) {
374 m_stall_map_size -= map_iter->second.size();
375 assert(m_stall_map_size >= 0);
376 reanalyzeList(map_iter->second, current_time);
377 }
378 m_stall_msg_map.clear();
379 }
380
381 void
382 MessageBuffer::stallMessage(Addr addr, Tick current_time)
383 {
384 DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
385 assert(isReady(current_time));
386 assert(getOffset(addr) == 0);
387 MsgPtr message = m_prio_heap.front();
388
389 // Since the message will just be moved to stall map, indicate that the
390 // buffer should not decrement the m_buf_msgs statistic
391 dequeue(current_time, false);
392
393 //
394 // Note: no event is scheduled to analyze the map at a later time.
395 // Instead the controller is responsible to call reanalyzeMessages when
396 // these addresses change state.
397 //
398 (m_stall_msg_map[addr]).push_back(message);
399 m_stall_map_size++;
400 m_stall_count++;
401 }
402
403 bool
404 MessageBuffer::hasStalledMsg(Addr addr) const
405 {
406 return (m_stall_msg_map.count(addr) != 0);
407 }
408
409 void
410 MessageBuffer::deferEnqueueingMessage(Addr addr, MsgPtr message)
411 {
412 DPRINTF(RubyQueue, "Deferring enqueueing message: %s, Address %#x\n",
413 *(message.get()), addr);
414 (m_deferred_msg_map[addr]).push_back(message);
415 }
416
417 void
418 MessageBuffer::enqueueDeferredMessages(Addr addr, Tick curTime, Tick delay)
419 {
420 assert(!isDeferredMsgMapEmpty(addr));
421 std::vector<MsgPtr>& msg_vec = m_deferred_msg_map[addr];
422 assert(msg_vec.size() > 0);
423
424 // enqueue all deferred messages associated with this address
425 for (MsgPtr m : msg_vec) {
426 enqueue(m, curTime, delay);
427 }
428
429 msg_vec.clear();
430 m_deferred_msg_map.erase(addr);
431 }
432
433 bool
434 MessageBuffer::isDeferredMsgMapEmpty(Addr addr) const
435 {
436 return m_deferred_msg_map.count(addr) == 0;
437 }
438
439 void
440 MessageBuffer::print(ostream& out) const
441 {
442 ccprintf(out, "[MessageBuffer: ");
443 if (m_consumer != NULL) {
444 ccprintf(out, " consumer-yes ");
445 }
446
447 vector<MsgPtr> copy(m_prio_heap);
448 sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
449 ccprintf(out, "%s] %s", copy, name());
450 }
451
452 bool
453 MessageBuffer::isReady(Tick current_time) const
454 {
455 return ((m_prio_heap.size() > 0) &&
456 (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
457 }
458
459 void
460 MessageBuffer::regStats()
461 {
462 m_not_avail_count
463 .name(name() + ".not_avail_count")
464 .desc("Number of times this buffer did not have N slots available")
465 .flags(Stats::nozero);
466
467 m_buf_msgs
468 .name(name() + ".avg_buf_msgs")
469 .desc("Average number of messages in buffer")
470 .flags(Stats::nozero);
471
472 m_stall_count
473 .name(name() + ".num_msg_stalls")
474 .desc("Number of times messages were stalled")
475 .flags(Stats::nozero);
476
477 m_occupancy
478 .name(name() + ".avg_buf_occ")
479 .desc("Average occupancy of buffer capacity")
480 .flags(Stats::nozero);
481
482 m_stall_time
483 .name(name() + ".avg_stall_time")
484 .desc("Average number of cycles messages are stalled in this MB")
485 .flags(Stats::nozero);
486
487 if (m_max_size > 0) {
488 m_occupancy = m_buf_msgs / m_max_size;
489 } else {
490 m_occupancy = 0;
491 }
492 }
493
494 uint32_t
495 MessageBuffer::functionalAccess(Packet *pkt, bool is_read)
496 {
497 DPRINTF(RubyQueue, "functional %s for %#x\n",
498 is_read ? "read" : "write", pkt->getAddr());
499
500 uint32_t num_functional_accesses = 0;
501
502 // Check the priority heap and write any messages that may
503 // correspond to the address in the packet.
504 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
505 Message *msg = m_prio_heap[i].get();
506 if (is_read && msg->functionalRead(pkt))
507 return 1;
508 else if (!is_read && msg->functionalWrite(pkt))
509 num_functional_accesses++;
510 }
511
512 // Check the stall queue and write any messages that may
513 // correspond to the address in the packet.
514 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
515 map_iter != m_stall_msg_map.end();
516 ++map_iter) {
517
518 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
519 it != (map_iter->second).end(); ++it) {
520
521 Message *msg = (*it).get();
522 if (is_read && msg->functionalRead(pkt))
523 return 1;
524 else if (!is_read && msg->functionalWrite(pkt))
525 num_functional_accesses++;
526 }
527 }
528
529 return num_functional_accesses;
530 }
531
532 MessageBuffer *
533 MessageBufferParams::create() const
534 {
535 return new MessageBuffer(*this);
536 }