2 * Copyright (c) 2019,2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/ruby/network/MessageBuffer.hh"
45 #include "base/cprintf.hh"
46 #include "base/logging.hh"
47 #include "base/random.hh"
48 #include "base/stl_helpers.hh"
49 #include "debug/RubyQueue.hh"
50 #include "mem/ruby/system/RubySystem.hh"
53 using m5::stl_helpers::operator<<;
55 MessageBuffer::MessageBuffer(const Params
*p
)
56 : SimObject(p
), m_stall_map_size(0),
57 m_max_size(p
->buffer_size
), m_time_last_time_size_checked(0),
58 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
59 m_last_arrival_time(0), m_strict_fifo(p
->ordered
),
60 m_randomization(p
->randomization
),
61 m_allow_zero_latency(p
->allow_zero_latency
)
65 m_size_last_time_size_checked
= 0;
66 m_size_at_cycle_start
= 0;
67 m_stalled_at_cycle_start
= 0;
68 m_msgs_this_cycle
= 0;
71 m_stall_msg_map
.clear();
78 m_dequeue_callback
= nullptr;
82 MessageBuffer::getSize(Tick curTime
)
84 if (m_time_last_time_size_checked
!= curTime
) {
85 m_time_last_time_size_checked
= curTime
;
86 m_size_last_time_size_checked
= m_prio_heap
.size();
89 return m_size_last_time_size_checked
;
93 MessageBuffer::areNSlotsAvailable(unsigned int n
, Tick current_time
)
96 // fast path when message buffers have infinite size
97 if (m_max_size
== 0) {
101 // determine the correct size for the current cycle
102 // pop operations shouldn't effect the network's visible size
103 // until schd cycle, but enqueue operations effect the visible
105 unsigned int current_size
= 0;
106 unsigned int current_stall_size
= 0;
108 if (m_time_last_time_pop
< current_time
) {
109 // no pops this cycle - heap and stall queue size is correct
110 current_size
= m_prio_heap
.size();
111 current_stall_size
= m_stall_map_size
;
113 if (m_time_last_time_enqueue
< current_time
) {
114 // no enqueues this cycle - m_size_at_cycle_start is correct
115 current_size
= m_size_at_cycle_start
;
117 // both pops and enqueues occured this cycle - add new
118 // enqueued msgs to m_size_at_cycle_start
119 current_size
= m_size_at_cycle_start
+ m_msgs_this_cycle
;
122 // Stall queue size at start is considered
123 current_stall_size
= m_stalled_at_cycle_start
;
126 // now compare the new size with our max size
127 if (current_size
+ current_stall_size
+ n
<= m_max_size
) {
130 DPRINTF(RubyQueue
, "n: %d, current_size: %d, heap size: %d, "
132 n
, current_size
+ current_stall_size
,
133 m_prio_heap
.size(), m_max_size
);
140 MessageBuffer::peek() const
142 DPRINTF(RubyQueue
, "Peeking at head of queue.\n");
143 const Message
* msg_ptr
= m_prio_heap
.front().get();
146 DPRINTF(RubyQueue
, "Message: %s\n", (*msg_ptr
));
150 // FIXME - move me somewhere else
155 time
+= random_mt
.random(0, 3); // [0...3]
156 if (random_mt
.random(0, 7) == 0) { // 1 in 8 chance
157 time
+= 100 + random_mt
.random(1, 15); // 100 + [1...15]
163 MessageBuffer::enqueue(MsgPtr message
, Tick current_time
, Tick delta
)
165 // record current time incase we have a pop that also adjusts my size
166 if (m_time_last_time_enqueue
< current_time
) {
167 m_msgs_this_cycle
= 0; // first msg this cycle
168 m_time_last_time_enqueue
= current_time
;
174 // Calculate the arrival time of the message, that is, the first
175 // cycle the message can be dequeued.
176 assert((delta
> 0) || m_allow_zero_latency
);
177 Tick arrival_time
= 0;
179 // random delays are inserted if either RubySystem level randomization flag
180 // is turned on, or the buffer level randomization is set
181 if (!RubySystem::getRandomization() && !m_randomization
) {
183 arrival_time
= current_time
+ delta
;
185 // Randomization - ignore delta
187 if (m_last_arrival_time
< current_time
) {
188 m_last_arrival_time
= current_time
;
190 arrival_time
= m_last_arrival_time
+ random_time();
192 arrival_time
= current_time
+ random_time();
196 // Check the arrival time
197 assert(arrival_time
>= current_time
);
199 if (arrival_time
< m_last_arrival_time
) {
200 panic("FIFO ordering violated: %s name: %s current time: %d "
201 "delta: %d arrival_time: %d last arrival_time: %d\n",
202 *this, name(), current_time
, delta
, arrival_time
,
203 m_last_arrival_time
);
207 // If running a cache trace, don't worry about the last arrival checks
208 if (!RubySystem::getWarmupEnabled()) {
209 m_last_arrival_time
= arrival_time
;
212 // compute the delay cycles and set enqueue time
213 Message
* msg_ptr
= message
.get();
214 assert(msg_ptr
!= NULL
);
216 assert(current_time
>= msg_ptr
->getLastEnqueueTime() &&
217 "ensure we aren't dequeued early");
219 msg_ptr
->updateDelayedTicks(current_time
);
220 msg_ptr
->setLastEnqueueTime(arrival_time
);
221 msg_ptr
->setMsgCounter(m_msg_counter
);
223 // Insert the message into the priority heap
224 m_prio_heap
.push_back(message
);
225 push_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
226 // Increment the number of messages statistic
229 assert((m_max_size
== 0) ||
230 ((m_prio_heap
.size() + m_stall_map_size
) <= m_max_size
));
232 DPRINTF(RubyQueue
, "Enqueue arrival_time: %lld, Message: %s\n",
233 arrival_time
, *(message
.get()));
235 // Schedule the wakeup
236 assert(m_consumer
!= NULL
);
237 m_consumer
->scheduleEventAbsolute(arrival_time
);
238 m_consumer
->storeEventInfo(m_vnet_id
);
242 MessageBuffer::dequeue(Tick current_time
, bool decrement_messages
)
244 DPRINTF(RubyQueue
, "Popping\n");
245 assert(isReady(current_time
));
247 // get MsgPtr of the message about to be dequeued
248 MsgPtr message
= m_prio_heap
.front();
250 // get the delay cycles
251 message
->updateDelayedTicks(current_time
);
252 Tick delay
= message
->getDelayedTicks();
254 m_stall_time
= curTick() - message
->getTime();
256 // record previous size and time so the current buffer size isn't
257 // adjusted until schd cycle
258 if (m_time_last_time_pop
< current_time
) {
259 m_size_at_cycle_start
= m_prio_heap
.size();
260 m_stalled_at_cycle_start
= m_stall_map_size
;
261 m_time_last_time_pop
= current_time
;
264 pop_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
265 m_prio_heap
.pop_back();
266 if (decrement_messages
) {
267 // If the message will be removed from the queue, decrement the
268 // number of message in the queue.
272 // if a dequeue callback was requested, call it now
273 if (m_dequeue_callback
) {
274 m_dequeue_callback();
281 MessageBuffer::registerDequeueCallback(std::function
<void()> callback
)
283 m_dequeue_callback
= callback
;
287 MessageBuffer::unregisterDequeueCallback()
289 m_dequeue_callback
= nullptr;
293 MessageBuffer::clear()
298 m_time_last_time_enqueue
= 0;
299 m_time_last_time_pop
= 0;
300 m_size_at_cycle_start
= 0;
301 m_stalled_at_cycle_start
= 0;
302 m_msgs_this_cycle
= 0;
306 MessageBuffer::recycle(Tick current_time
, Tick recycle_latency
)
308 DPRINTF(RubyQueue
, "Recycling.\n");
309 assert(isReady(current_time
));
310 MsgPtr node
= m_prio_heap
.front();
311 pop_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
313 Tick future_time
= current_time
+ recycle_latency
;
314 node
->setLastEnqueueTime(future_time
);
316 m_prio_heap
.back() = node
;
317 push_heap(m_prio_heap
.begin(), m_prio_heap
.end(), greater
<MsgPtr
>());
318 m_consumer
->scheduleEventAbsolute(future_time
);
322 MessageBuffer::reanalyzeList(list
<MsgPtr
> <
, Tick schdTick
)
324 while (!lt
.empty()) {
325 MsgPtr m
= lt
.front();
326 assert(m
->getLastEnqueueTime() <= schdTick
);
328 m_prio_heap
.push_back(m
);
329 push_heap(m_prio_heap
.begin(), m_prio_heap
.end(),
332 m_consumer
->scheduleEventAbsolute(schdTick
);
334 DPRINTF(RubyQueue
, "Requeue arrival_time: %lld, Message: %s\n",
335 schdTick
, *(m
.get()));
342 MessageBuffer::reanalyzeMessages(Addr addr
, Tick current_time
)
344 DPRINTF(RubyQueue
, "ReanalyzeMessages %#x\n", addr
);
345 assert(m_stall_msg_map
.count(addr
) > 0);
348 // Put all stalled messages associated with this address back on the
349 // prio heap. The reanalyzeList call will make sure the consumer is
350 // scheduled for the current cycle so that the previously stalled messages
351 // will be observed before any younger messages that may arrive this cycle
353 m_stall_map_size
-= m_stall_msg_map
[addr
].size();
354 assert(m_stall_map_size
>= 0);
355 reanalyzeList(m_stall_msg_map
[addr
], current_time
);
356 m_stall_msg_map
.erase(addr
);
360 MessageBuffer::reanalyzeAllMessages(Tick current_time
)
362 DPRINTF(RubyQueue
, "ReanalyzeAllMessages\n");
365 // Put all stalled messages associated with this address back on the
366 // prio heap. The reanalyzeList call will make sure the consumer is
367 // scheduled for the current cycle so that the previously stalled messages
368 // will be observed before any younger messages that may arrive this cycle.
370 for (StallMsgMapType::iterator map_iter
= m_stall_msg_map
.begin();
371 map_iter
!= m_stall_msg_map
.end(); ++map_iter
) {
372 m_stall_map_size
-= map_iter
->second
.size();
373 assert(m_stall_map_size
>= 0);
374 reanalyzeList(map_iter
->second
, current_time
);
376 m_stall_msg_map
.clear();
380 MessageBuffer::stallMessage(Addr addr
, Tick current_time
)
382 DPRINTF(RubyQueue
, "Stalling due to %#x\n", addr
);
383 assert(isReady(current_time
));
384 assert(getOffset(addr
) == 0);
385 MsgPtr message
= m_prio_heap
.front();
387 // Since the message will just be moved to stall map, indicate that the
388 // buffer should not decrement the m_buf_msgs statistic
389 dequeue(current_time
, false);
392 // Note: no event is scheduled to analyze the map at a later time.
393 // Instead the controller is responsible to call reanalyzeMessages when
394 // these addresses change state.
396 (m_stall_msg_map
[addr
]).push_back(message
);
402 MessageBuffer::hasStalledMsg(Addr addr
) const
404 return (m_stall_msg_map
.count(addr
) != 0);
408 MessageBuffer::deferEnqueueingMessage(Addr addr
, MsgPtr message
)
410 DPRINTF(RubyQueue
, "Deferring enqueueing message: %s, Address %#x\n",
411 *(message
.get()), addr
);
412 (m_deferred_msg_map
[addr
]).push_back(message
);
416 MessageBuffer::enqueueDeferredMessages(Addr addr
, Tick curTime
, Tick delay
)
418 assert(!isDeferredMsgMapEmpty(addr
));
419 std::vector
<MsgPtr
>& msg_vec
= m_deferred_msg_map
[addr
];
420 assert(msg_vec
.size() > 0);
422 // enqueue all deferred messages associated with this address
423 for (MsgPtr m
: msg_vec
) {
424 enqueue(m
, curTime
, delay
);
428 m_deferred_msg_map
.erase(addr
);
432 MessageBuffer::isDeferredMsgMapEmpty(Addr addr
) const
434 return m_deferred_msg_map
.count(addr
) == 0;
438 MessageBuffer::print(ostream
& out
) const
440 ccprintf(out
, "[MessageBuffer: ");
441 if (m_consumer
!= NULL
) {
442 ccprintf(out
, " consumer-yes ");
445 vector
<MsgPtr
> copy(m_prio_heap
);
446 sort_heap(copy
.begin(), copy
.end(), greater
<MsgPtr
>());
447 ccprintf(out
, "%s] %s", copy
, name());
451 MessageBuffer::isReady(Tick current_time
) const
453 return ((m_prio_heap
.size() > 0) &&
454 (m_prio_heap
.front()->getLastEnqueueTime() <= current_time
));
458 MessageBuffer::regStats()
461 .name(name() + ".not_avail_count")
462 .desc("Number of times this buffer did not have N slots available")
463 .flags(Stats::nozero
);
466 .name(name() + ".avg_buf_msgs")
467 .desc("Average number of messages in buffer")
468 .flags(Stats::nozero
);
471 .name(name() + ".num_msg_stalls")
472 .desc("Number of times messages were stalled")
473 .flags(Stats::nozero
);
476 .name(name() + ".avg_buf_occ")
477 .desc("Average occupancy of buffer capacity")
478 .flags(Stats::nozero
);
481 .name(name() + ".avg_stall_time")
482 .desc("Average number of cycles messages are stalled in this MB")
483 .flags(Stats::nozero
);
485 if (m_max_size
> 0) {
486 m_occupancy
= m_buf_msgs
/ m_max_size
;
493 MessageBuffer::functionalAccess(Packet
*pkt
, bool is_read
)
495 DPRINTF(RubyQueue
, "functional %s for %#x\n",
496 is_read
? "read" : "write", pkt
->getAddr());
498 uint32_t num_functional_accesses
= 0;
500 // Check the priority heap and write any messages that may
501 // correspond to the address in the packet.
502 for (unsigned int i
= 0; i
< m_prio_heap
.size(); ++i
) {
503 Message
*msg
= m_prio_heap
[i
].get();
504 if (is_read
&& msg
->functionalRead(pkt
))
506 else if (!is_read
&& msg
->functionalWrite(pkt
))
507 num_functional_accesses
++;
510 // Check the stall queue and write any messages that may
511 // correspond to the address in the packet.
512 for (StallMsgMapType::iterator map_iter
= m_stall_msg_map
.begin();
513 map_iter
!= m_stall_msg_map
.end();
516 for (std::list
<MsgPtr
>::iterator it
= (map_iter
->second
).begin();
517 it
!= (map_iter
->second
).end(); ++it
) {
519 Message
*msg
= (*it
).get();
520 if (is_read
&& msg
->functionalRead(pkt
))
522 else if (!is_read
&& msg
->functionalWrite(pkt
))
523 num_functional_accesses
++;
527 return num_functional_accesses
;
531 MessageBufferParams::create()
533 return new MessageBuffer(this);