2 * Copyright (c) 2019,2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/ruby/network/MessageBuffer.hh"
45 #include "base/cprintf.hh"
46 #include "base/logging.hh"
47 #include "base/random.hh"
48 #include "base/stl_helpers.hh"
49 #include "debug/RubyQueue.hh"
50 #include "mem/ruby/system/RubySystem.hh"
53 using m5::stl_helpers::operator<<;
55 MessageBuffer::MessageBuffer(const Params
&p
)
56 : SimObject(p
), m_stall_map_size(0),
57 m_max_size(p
.buffer_size
), m_time_last_time_size_checked(0),
58 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
59 m_last_arrival_time(0), m_strict_fifo(p
.ordered
),
60 m_randomization(p
.randomization
),
61 m_allow_zero_latency(p
.allow_zero_latency
)
65 m_size_last_time_size_checked
= 0;
66 m_size_at_cycle_start
= 0;
67 m_stalled_at_cycle_start
= 0;
68 m_msgs_this_cycle
= 0;
71 m_stall_msg_map
.clear();
78 m_dequeue_callback
= nullptr;
82 MessageBuffer::getSize(Tick curTime
)
84 if (m_time_last_time_size_checked
!= curTime
) {
85 m_time_last_time_size_checked
= curTime
;
86 m_size_last_time_size_checked
= m_prio_heap
.size();
89 return m_size_last_time_size_checked
;
93 MessageBuffer::areNSlotsAvailable(unsigned int n
, Tick current_time
)
96 // fast path when message buffers have infinite size
97 if (m_max_size
== 0) {
101 // determine the correct size for the current cycle
102 // pop operations shouldn't effect the network's visible size
103 // until schd cycle, but enqueue operations effect the visible
105 unsigned int current_size
= 0;
106 unsigned int current_stall_size
= 0;
108 if (m_time_last_time_pop
< current_time
) {
109 // no pops this cycle - heap and stall queue size is correct
110 current_size
= m_prio_heap
.size();
111 current_stall_size
= m_stall_map_size
;
113 if (m_time_last_time_enqueue
< current_time
) {
114 // no enqueues this cycle - m_size_at_cycle_start is correct
115 current_size
= m_size_at_cycle_start
;
117 // both pops and enqueues occured this cycle - add new
118 // enqueued msgs to m_size_at_cycle_start
119 current_size
= m_size_at_cycle_start
+ m_msgs_this_cycle
;
122 // Stall queue size at start is considered
123 current_stall_size
= m_stalled_at_cycle_start
;
126 // now compare the new size with our max size
127 if (current_size
+ current_stall_size
+ n
<= m_max_size
) {
130 DPRINTF(RubyQueue
, "n: %d, current_size: %d, heap size: %d, "
132 n
, current_size
+ current_stall_size
,
133 m_prio_heap
.size(), m_max_size
);
140 MessageBuffer::peek() const
142 DPRINTF(RubyQueue
, "Peeking at head of queue.\n");
143 const Message
* msg_ptr
= m_prio_heap
.front().get();
146 DPRINTF(RubyQueue
, "Message: %s\n", (*msg_ptr
));
150 // FIXME - move me somewhere else
155 time
+= random_mt
.random(0, 3); // [0...3]
156 if (random_mt
.random(0, 7) == 0) { // 1 in 8 chance
157 time
+= 100 + random_mt
.random(1, 15); // 100 + [1...15]
163 MessageBuffer::enqueue(MsgPtr message
, Tick current_time
, Tick delta
)
165 // record current time incase we have a pop that also adjusts my size
166 if (m_time_last_time_enqueue
< current_time
) {
167 m_msgs_this_cycle
= 0; // first msg this cycle
168 m_time_last_time_enqueue
= current_time
;
174 // Calculate the arrival time of the message, that is, the first
175 // cycle the message can be dequeued.
176 panic_if((delta
== 0) && !m_allow_zero_latency
,
177 "Delta equals zero and allow_zero_latency is false during enqueue");
178 Tick arrival_time
= 0;
180 // random delays are inserted if the RubySystem level randomization flag
181 // is turned on and this buffer allows it
182 if ((m_randomization
== MessageRandomization::disabled
) ||
183 ((m_randomization
== MessageRandomization::ruby_system
) &&
184 !RubySystem::getRandomization())) {
186 arrival_time
= current_time
+ delta
;
188 // Randomization - ignore delta
190 if (m_last_arrival_time
< current_time
) {
191 m_last_arrival_time
= current_time
;
193 arrival_time
= m_last_arrival_time
+ random_time();
195 arrival_time
= current_time
+ random_time();
199 // Check the arrival time
200 assert(arrival_time
>= current_time
);
202 if (arrival_time
< m_last_arrival_time
) {
203 panic("FIFO ordering violated: %s name: %s current time: %d "
204 "delta: %d arrival_time: %d last arrival_time: %d\n",
205 *this, name(), current_time
, delta
, arrival_time
,
206 m_last_arrival_time
);
210 // If running a cache trace, don't worry about the last arrival checks
211 if (!RubySystem::getWarmupEnabled()) {
212 m_last_arrival_time
= arrival_time
;
215 // compute the delay cycles and set enqueue time
216 Message
* msg_ptr
= message
.get();
217 assert(msg_ptr
!= NULL
);
219 assert(current_time
>= msg_ptr
->getLastEnqueueTime() &&
220 "ensure we aren't dequeued early");
222 msg_ptr
->updateDelayedTicks(current_time
);
223 msg_ptr
->setLastEnqueueTime(arrival_time
);
224 msg_ptr
->setMsgCounter(m_msg_counter
);
226 // Insert the message into the priority heap
227 m_prio_heap
.push_back(message
);
228 push_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
229 // Increment the number of messages statistic
232 assert((m_max_size
== 0) ||
233 ((m_prio_heap
.size() + m_stall_map_size
) <= m_max_size
));
235 DPRINTF(RubyQueue
, "Enqueue arrival_time: %lld, Message: %s\n",
236 arrival_time
, *(message
.get()));
238 // Schedule the wakeup
239 assert(m_consumer
!= NULL
);
240 m_consumer
->scheduleEventAbsolute(arrival_time
);
241 m_consumer
->storeEventInfo(m_vnet_id
);
245 MessageBuffer::dequeue(Tick current_time
, bool decrement_messages
)
247 DPRINTF(RubyQueue
, "Popping\n");
248 assert(isReady(current_time
));
250 // get MsgPtr of the message about to be dequeued
251 MsgPtr message
= m_prio_heap
.front();
253 // get the delay cycles
254 message
->updateDelayedTicks(current_time
);
255 Tick delay
= message
->getDelayedTicks();
257 m_stall_time
= curTick() - message
->getTime();
259 // record previous size and time so the current buffer size isn't
260 // adjusted until schd cycle
261 if (m_time_last_time_pop
< current_time
) {
262 m_size_at_cycle_start
= m_prio_heap
.size();
263 m_stalled_at_cycle_start
= m_stall_map_size
;
264 m_time_last_time_pop
= current_time
;
267 pop_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
268 m_prio_heap
.pop_back();
269 if (decrement_messages
) {
270 // If the message will be removed from the queue, decrement the
271 // number of message in the queue.
275 // if a dequeue callback was requested, call it now
276 if (m_dequeue_callback
) {
277 m_dequeue_callback();
284 MessageBuffer::registerDequeueCallback(std::function
<void()> callback
)
286 m_dequeue_callback
= callback
;
290 MessageBuffer::unregisterDequeueCallback()
292 m_dequeue_callback
= nullptr;
296 MessageBuffer::clear()
301 m_time_last_time_enqueue
= 0;
302 m_time_last_time_pop
= 0;
303 m_size_at_cycle_start
= 0;
304 m_stalled_at_cycle_start
= 0;
305 m_msgs_this_cycle
= 0;
309 MessageBuffer::recycle(Tick current_time
, Tick recycle_latency
)
311 DPRINTF(RubyQueue
, "Recycling.\n");
312 assert(isReady(current_time
));
313 MsgPtr node
= m_prio_heap
.front();
314 pop_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
316 Tick future_time
= current_time
+ recycle_latency
;
317 node
->setLastEnqueueTime(future_time
);
319 m_prio_heap
.back() = node
;
320 push_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
321 m_consumer
->scheduleEventAbsolute(future_time
);
325 MessageBuffer::reanalyzeList(list
<MsgPtr
> <
, Tick schdTick
)
327 while (!lt
.empty()) {
328 MsgPtr m
= lt
.front();
329 assert(m
->getLastEnqueueTime() <= schdTick
);
331 m_prio_heap
.push_back(m
);
332 push_heap(m_prio_heap
.begin(), m_prio_heap
.end(),
335 m_consumer
->scheduleEventAbsolute(schdTick
);
337 DPRINTF(RubyQueue
, "Requeue arrival_time: %lld, Message: %s\n",
338 schdTick
, *(m
.get()));
345 MessageBuffer::reanalyzeMessages(Addr addr
, Tick current_time
)
347 DPRINTF(RubyQueue
, "ReanalyzeMessages %#x\n", addr
);
348 assert(m_stall_msg_map
.count(addr
) > 0);
351 // Put all stalled messages associated with this address back on the
352 // prio heap. The reanalyzeList call will make sure the consumer is
353 // scheduled for the current cycle so that the previously stalled messages
354 // will be observed before any younger messages that may arrive this cycle
356 m_stall_map_size
-= m_stall_msg_map
[addr
].size();
357 assert(m_stall_map_size
>= 0);
358 reanalyzeList(m_stall_msg_map
[addr
], current_time
);
359 m_stall_msg_map
.erase(addr
);
363 MessageBuffer::reanalyzeAllMessages(Tick current_time
)
365 DPRINTF(RubyQueue
, "ReanalyzeAllMessages\n");
368 // Put all stalled messages associated with this address back on the
369 // prio heap. The reanalyzeList call will make sure the consumer is
370 // scheduled for the current cycle so that the previously stalled messages
371 // will be observed before any younger messages that may arrive this cycle.
373 for (StallMsgMapType::iterator map_iter
= m_stall_msg_map
.begin();
374 map_iter
!= m_stall_msg_map
.end(); ++map_iter
) {
375 m_stall_map_size
-= map_iter
->second
.size();
376 assert(m_stall_map_size
>= 0);
377 reanalyzeList(map_iter
->second
, current_time
);
379 m_stall_msg_map
.clear();
383 MessageBuffer::stallMessage(Addr addr
, Tick current_time
)
385 DPRINTF(RubyQueue
, "Stalling due to %#x\n", addr
);
386 assert(isReady(current_time
));
387 assert(getOffset(addr
) == 0);
388 MsgPtr message
= m_prio_heap
.front();
390 // Since the message will just be moved to stall map, indicate that the
391 // buffer should not decrement the m_buf_msgs statistic
392 dequeue(current_time
, false);
395 // Note: no event is scheduled to analyze the map at a later time.
396 // Instead the controller is responsible to call reanalyzeMessages when
397 // these addresses change state.
399 (m_stall_msg_map
[addr
]).push_back(message
);
405 MessageBuffer::hasStalledMsg(Addr addr
) const
407 return (m_stall_msg_map
.count(addr
) != 0);
411 MessageBuffer::deferEnqueueingMessage(Addr addr
, MsgPtr message
)
413 DPRINTF(RubyQueue
, "Deferring enqueueing message: %s, Address %#x\n",
414 *(message
.get()), addr
);
415 (m_deferred_msg_map
[addr
]).push_back(message
);
419 MessageBuffer::enqueueDeferredMessages(Addr addr
, Tick curTime
, Tick delay
)
421 assert(!isDeferredMsgMapEmpty(addr
));
422 std::vector
<MsgPtr
>& msg_vec
= m_deferred_msg_map
[addr
];
423 assert(msg_vec
.size() > 0);
425 // enqueue all deferred messages associated with this address
426 for (MsgPtr m
: msg_vec
) {
427 enqueue(m
, curTime
, delay
);
431 m_deferred_msg_map
.erase(addr
);
435 MessageBuffer::isDeferredMsgMapEmpty(Addr addr
) const
437 return m_deferred_msg_map
.count(addr
) == 0;
441 MessageBuffer::print(ostream
& out
) const
443 ccprintf(out
, "[MessageBuffer: ");
444 if (m_consumer
!= NULL
) {
445 ccprintf(out
, " consumer-yes ");
448 vector
<MsgPtr
> copy(m_prio_heap
);
449 sort_heap(copy
.begin(), copy
.end(), greater
<MsgPtr
>());
450 ccprintf(out
, "%s] %s", copy
, name());
454 MessageBuffer::isReady(Tick current_time
) const
456 return ((m_prio_heap
.size() > 0) &&
457 (m_prio_heap
.front()->getLastEnqueueTime() <= current_time
));
461 MessageBuffer::regStats()
464 .name(name() + ".not_avail_count")
465 .desc("Number of times this buffer did not have N slots available")
466 .flags(Stats::nozero
);
469 .name(name() + ".avg_buf_msgs")
470 .desc("Average number of messages in buffer")
471 .flags(Stats::nozero
);
474 .name(name() + ".num_msg_stalls")
475 .desc("Number of times messages were stalled")
476 .flags(Stats::nozero
);
479 .name(name() + ".avg_buf_occ")
480 .desc("Average occupancy of buffer capacity")
481 .flags(Stats::nozero
);
484 .name(name() + ".avg_stall_time")
485 .desc("Average number of cycles messages are stalled in this MB")
486 .flags(Stats::nozero
);
488 if (m_max_size
> 0) {
489 m_occupancy
= m_buf_msgs
/ m_max_size
;
496 MessageBuffer::functionalAccess(Packet
*pkt
, bool is_read
)
498 DPRINTF(RubyQueue
, "functional %s for %#x\n",
499 is_read
? "read" : "write", pkt
->getAddr());
501 uint32_t num_functional_accesses
= 0;
503 // Check the priority heap and write any messages that may
504 // correspond to the address in the packet.
505 for (unsigned int i
= 0; i
< m_prio_heap
.size(); ++i
) {
506 Message
*msg
= m_prio_heap
[i
].get();
507 if (is_read
&& msg
->functionalRead(pkt
))
509 else if (!is_read
&& msg
->functionalWrite(pkt
))
510 num_functional_accesses
++;
513 // Check the stall queue and write any messages that may
514 // correspond to the address in the packet.
515 for (StallMsgMapType::iterator map_iter
= m_stall_msg_map
.begin();
516 map_iter
!= m_stall_msg_map
.end();
519 for (std::list
<MsgPtr
>::iterator it
= (map_iter
->second
).begin();
520 it
!= (map_iter
->second
).end(); ++it
) {
522 Message
*msg
= (*it
).get();
523 if (is_read
&& msg
->functionalRead(pkt
))
525 else if (!is_read
&& msg
->functionalWrite(pkt
))
526 num_functional_accesses
++;
530 return num_functional_accesses
;