2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "base/cast.hh"
32 #include "base/cprintf.hh"
33 #include "debug/RubyNetwork.hh"
34 #include "mem/ruby/network/simple/Throttle.hh"
35 #include "mem/ruby/network/MessageBuffer.hh"
36 #include "mem/ruby/network/Network.hh"
37 #include "mem/ruby/slicc_interface/NetworkMessage.hh"
38 #include "mem/ruby/system/System.hh"
42 const int MESSAGE_SIZE_MULTIPLIER
= 1000;
43 //const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
44 const int BROADCAST_SCALING
= 1;
45 const int PRIORITY_SWITCH_LIMIT
= 128;
47 static int network_message_to_size(NetworkMessage
* net_msg_ptr
);
49 Throttle::Throttle(int sID
, NodeID node
, Cycles link_latency
,
50 int link_bandwidth_multiplier
, int endpoint_bandwidth
,
54 init(node
, link_latency
, link_bandwidth_multiplier
, endpoint_bandwidth
);
58 Throttle::Throttle(NodeID node
, Cycles link_latency
,
59 int link_bandwidth_multiplier
, int endpoint_bandwidth
,
63 init(node
, link_latency
, link_bandwidth_multiplier
, endpoint_bandwidth
);
68 Throttle::init(NodeID node
, Cycles link_latency
,
69 int link_bandwidth_multiplier
, int endpoint_bandwidth
)
72 assert(link_bandwidth_multiplier
> 0);
73 m_link_bandwidth_multiplier
= link_bandwidth_multiplier
;
75 m_link_latency
= link_latency
;
76 m_endpoint_bandwidth
= endpoint_bandwidth
;
78 m_wakeups_wo_switch
= 0;
79 m_link_utilization_proxy
= 0;
83 Throttle::addLinks(const map
<int, MessageBuffer
*>& in_vec
,
84 const map
<int, MessageBuffer
*>& out_vec
)
86 assert(in_vec
.size() == out_vec
.size());
88 for (auto& it
: in_vec
) {
91 auto jt
= out_vec
.find(vnet
);
92 assert(jt
!= out_vec
.end());
94 MessageBuffer
*in_ptr
= it
.second
;
95 MessageBuffer
*out_ptr
= (*jt
).second
;
98 m_out
[vnet
] = out_ptr
;
99 m_units_remaining
[vnet
] = 0;
101 // Set consumer and description
102 in_ptr
->setConsumer(this);
103 string desc
= "[Queue to Throttle " + to_string(m_sID
) + " " +
104 to_string(m_node
) + "]";
105 in_ptr
->setDescription(desc
);
110 Throttle::operateVnet(int vnet
, int &bw_remaining
, bool &schedule_wakeup
,
111 MessageBuffer
*in
, MessageBuffer
*out
)
115 assert(m_units_remaining
[vnet
] >= 0);
117 while (bw_remaining
> 0 && (in
->isReady() || m_units_remaining
[vnet
] > 0) &&
118 out
->areNSlotsAvailable(1)) {
120 // See if we are done transferring the previous message on
121 // this virtual network
122 if (m_units_remaining
[vnet
] == 0 && in
->isReady()) {
123 // Find the size of the message we are moving
124 MsgPtr msg_ptr
= in
->peekMsgPtr();
125 NetworkMessage
* net_msg_ptr
=
126 safe_cast
<NetworkMessage
*>(msg_ptr
.get());
127 m_units_remaining
[vnet
] +=
128 network_message_to_size(net_msg_ptr
);
130 DPRINTF(RubyNetwork
, "throttle: %d my bw %d bw spent "
131 "enqueueing net msg %d time: %lld.\n",
132 m_node
, getLinkBandwidth(), m_units_remaining
[vnet
],
133 g_system_ptr
->curCycle());
137 out
->enqueue(msg_ptr
, m_link_latency
);
140 m_msg_counts
[net_msg_ptr
->getMessageSize()][vnet
]++;
141 DPRINTF(RubyNetwork
, "%s\n", *out
);
144 // Calculate the amount of bandwidth we spent on this message
145 int diff
= m_units_remaining
[vnet
] - bw_remaining
;
146 m_units_remaining
[vnet
] = max(0, diff
);
147 bw_remaining
= max(0, -diff
);
150 if (bw_remaining
> 0 && (in
->isReady() || m_units_remaining
[vnet
] > 0) &&
151 !out
->areNSlotsAvailable(1)) {
152 DPRINTF(RubyNetwork
, "vnet: %d", vnet
);
154 // schedule me to wakeup again because I'm waiting for my
155 // output queue to become available
156 schedule_wakeup
= true;
163 // Limits the number of message sent to a limited number of bytes/cycle.
164 assert(getLinkBandwidth() > 0);
165 int bw_remaining
= getLinkBandwidth();
167 m_wakeups_wo_switch
++;
168 bool schedule_wakeup
= false;
170 // variable for deciding the direction in which to iterate
171 bool iteration_direction
= false;
174 // invert priorities to avoid starvation seen in the component network
175 if (m_wakeups_wo_switch
> PRIORITY_SWITCH_LIMIT
) {
176 m_wakeups_wo_switch
= 0;
177 iteration_direction
= true;
180 if (iteration_direction
) {
181 for (auto& it
: m_in
) {
183 operateVnet(vnet
, bw_remaining
, schedule_wakeup
,
184 it
.second
, m_out
[vnet
]);
187 for (auto it
= m_in
.rbegin(); it
!= m_in
.rend(); ++it
) {
188 int vnet
= (*it
).first
;
189 operateVnet(vnet
, bw_remaining
, schedule_wakeup
,
190 (*it
).second
, m_out
[vnet
]);
194 // We should only wake up when we use the bandwidth
195 // This is only mostly true
196 // assert(bw_remaining != getLinkBandwidth());
198 // Record that we used some or all of the link bandwidth this cycle
199 double ratio
= 1.0 - (double(bw_remaining
) / double(getLinkBandwidth()));
201 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
202 m_link_utilization_proxy
+= ratio
;
204 if (bw_remaining
> 0 && !schedule_wakeup
) {
205 // We have extra bandwidth and our output buffer was
206 // available, so we must not have anything else to do until
207 // another message arrives.
208 DPRINTF(RubyNetwork
, "%s not scheduled again\n", *this);
210 DPRINTF(RubyNetwork
, "%s scheduled again\n", *this);
212 // We are out of bandwidth for this cycle, so wakeup next
213 // cycle and continue
214 scheduleEvent(Cycles(1));
219 Throttle::regStats(string parent
)
222 .name(parent
+ csprintf(".throttle%i", m_node
) + ".link_utilization");
224 for (MessageSizeType type
= MessageSizeType_FIRST
;
225 type
< MessageSizeType_NUM
; ++type
) {
226 m_msg_counts
[(unsigned int)type
]
227 .init(Network::getNumberOfVirtualNetworks())
228 .name(parent
+ csprintf(".throttle%i", m_node
) + ".msg_count." +
229 MessageSizeType_to_string(type
))
230 .flags(Stats::nozero
)
232 m_msg_bytes
[(unsigned int) type
]
233 .name(parent
+ csprintf(".throttle%i", m_node
) + ".msg_bytes." +
234 MessageSizeType_to_string(type
))
235 .flags(Stats::nozero
)
238 m_msg_bytes
[(unsigned int) type
] = m_msg_counts
[type
] * Stats::constant(
239 Network::MessageSizeType_to_int(type
));
244 Throttle::clearStats()
246 m_link_utilization_proxy
= 0;
250 Throttle::collateStats()
252 m_link_utilization
= 100.0 * m_link_utilization_proxy
253 / (double(g_system_ptr
->curCycle() - g_ruby_start
));
257 Throttle::print(ostream
& out
) const
259 ccprintf(out
, "[%i bw: %i]", m_node
, getLinkBandwidth());
263 network_message_to_size(NetworkMessage
* net_msg_ptr
)
265 assert(net_msg_ptr
!= NULL
);
267 int size
= Network::MessageSizeType_to_int(net_msg_ptr
->getMessageSize());
268 size
*= MESSAGE_SIZE_MULTIPLIER
;
270 // Artificially increase the size of broadcast messages
271 if (BROADCAST_SCALING
> 1 && net_msg_ptr
->getDestination().isBroadcast())
272 size
*= BROADCAST_SCALING
;