X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmem%2Fruby%2Fnetwork%2Fsimple%2FThrottle.cc;h=91bad217bbfb32662f8bd70e5330bfe8a987145d;hb=7a0d5aafe4b845a2d1cff6210d7c6ee66e8aba61;hp=905a7aa28b9138f67bc993bca188cbbc5a495624;hpb=eddac53ff60c579eff28134bde84783fe36d6214;p=gem5.git diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc index 905a7aa28..91bad217b 100644 --- a/src/mem/ruby/network/simple/Throttle.cc +++ b/src/mem/ruby/network/simple/Throttle.cc @@ -28,19 +28,17 @@ #include +#include "base/cast.hh" #include "base/cprintf.hh" #include "debug/RubyNetwork.hh" -#include "mem/protocol/Protocol.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" #include "mem/ruby/network/simple/Throttle.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/Network.hh" #include "mem/ruby/slicc_interface/NetworkMessage.hh" #include "mem/ruby/system/System.hh" using namespace std; -const int HIGH_RANGE = 256; -const int ADJUST_INTERVAL = 50000; const int MESSAGE_SIZE_MULTIPLIER = 1000; //const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems const int BROADCAST_SCALING = 1; @@ -48,74 +46,115 @@ const int PRIORITY_SWITCH_LIMIT = 128; static int network_message_to_size(NetworkMessage* net_msg_ptr); -Throttle::Throttle(int sID, NodeID node, int link_latency, - int link_bandwidth_multiplier) +Throttle::Throttle(int sID, NodeID node, Cycles link_latency, + int link_bandwidth_multiplier, int endpoint_bandwidth, + ClockedObject *em) + : Consumer(em) { - init(node, link_latency, link_bandwidth_multiplier); + init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth); m_sID = sID; } -Throttle::Throttle(NodeID node, int link_latency, - int link_bandwidth_multiplier) +Throttle::Throttle(NodeID node, Cycles link_latency, + int link_bandwidth_multiplier, int endpoint_bandwidth, + ClockedObject *em) + : Consumer(em) { - init(node, link_latency, link_bandwidth_multiplier); + init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth); m_sID = 0; } void -Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier) +Throttle::init(NodeID node, Cycles link_latency, + int link_bandwidth_multiplier, int endpoint_bandwidth) { m_node = node; - m_vnets = 0; - assert(link_bandwidth_multiplier > 0); m_link_bandwidth_multiplier = link_bandwidth_multiplier; + m_link_latency = link_latency; + m_endpoint_bandwidth = endpoint_bandwidth; m_wakeups_wo_switch = 0; - clearStats(); + m_link_utilization_proxy = 0; } void -Throttle::clear() +Throttle::addLinks(const map& in_vec, + const map& out_vec) { - for (int counter = 0; counter < m_vnets; counter++) { - m_in[counter]->clear(); - m_out[counter]->clear(); + assert(in_vec.size() == out_vec.size()); + + for (auto& it : in_vec) { + int vnet = it.first; + + auto jt = out_vec.find(vnet); + assert(jt != out_vec.end()); + + MessageBuffer *in_ptr = it.second; + MessageBuffer *out_ptr = (*jt).second; + + m_in[vnet] = in_ptr; + m_out[vnet] = out_ptr; + m_units_remaining[vnet] = 0; + + // Set consumer and description + in_ptr->setConsumer(this); + string desc = "[Queue to Throttle " + to_string(m_sID) + " " + + to_string(m_node) + "]"; + in_ptr->setDescription(desc); } } void -Throttle::addLinks(const std::vector& in_vec, - const std::vector& out_vec) +Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup, + MessageBuffer *in, MessageBuffer *out) { - assert(in_vec.size() == out_vec.size()); - for (int i=0; i= 0); + + while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) && + out->areNSlotsAvailable(1)) { + + // See if we are done transferring the previous message on + // this virtual network + if (m_units_remaining[vnet] == 0 && in->isReady()) { + // Find the size of the message we are moving + MsgPtr msg_ptr = in->peekMsgPtr(); + NetworkMessage* net_msg_ptr = + safe_cast(msg_ptr.get()); + m_units_remaining[vnet] += + network_message_to_size(net_msg_ptr); + + DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent " + "enqueueing net msg %d time: %lld.\n", + m_node, getLinkBandwidth(), m_units_remaining[vnet], + g_system_ptr->curCycle()); + + // Move the message + in->dequeue(); + out->enqueue(msg_ptr, m_link_latency); + + // Count the message + m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++; + DPRINTF(RubyNetwork, "%s\n", *out); } + + // Calculate the amount of bandwidth we spent on this message + int diff = m_units_remaining[vnet] - bw_remaining; + m_units_remaining[vnet] = max(0, diff); + bw_remaining = max(0, -diff); } -} -void -Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr) -{ - m_units_remaining.push_back(0); - m_in.push_back(in_ptr); - m_out.push_back(out_ptr); - - // Set consumer and description - m_in[m_vnets]->setConsumer(this); - string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " + - NodeIDToString(m_node) + "]"; - m_in[m_vnets]->setDescription(desc); - m_vnets++; + if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) && + !out->areNSlotsAvailable(1)) { + DPRINTF(RubyNetwork, "vnet: %d", vnet); + + // schedule me to wakeup again because I'm waiting for my + // output queue to become available + schedule_wakeup = true; + } } void @@ -125,71 +164,30 @@ Throttle::wakeup() assert(getLinkBandwidth() > 0); int bw_remaining = getLinkBandwidth(); - // Give the highest numbered link priority most of the time m_wakeups_wo_switch++; - int highest_prio_vnet = m_vnets-1; - int lowest_prio_vnet = 0; - int counter = 1; bool schedule_wakeup = false; + // variable for deciding the direction in which to iterate + bool iteration_direction = false; + + // invert priorities to avoid starvation seen in the component network if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) { m_wakeups_wo_switch = 0; - highest_prio_vnet = 0; - lowest_prio_vnet = m_vnets-1; - counter = -1; + iteration_direction = true; } - for (int vnet = highest_prio_vnet; - (vnet * counter) >= (counter * lowest_prio_vnet); - vnet -= counter) { - - assert(m_out[vnet] != NULL); - assert(m_in[vnet] != NULL); - assert(m_units_remaining[vnet] >= 0); - - while (bw_remaining > 0 && - (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && - m_out[vnet]->areNSlotsAvailable(1)) { - - // See if we are done transferring the previous message on - // this virtual network - if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) { - // Find the size of the message we are moving - MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr(); - NetworkMessage* net_msg_ptr = - safe_cast(msg_ptr.get()); - m_units_remaining[vnet] += - network_message_to_size(net_msg_ptr); - - DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent " - "enqueueing net msg %d time: %lld.\n", - m_node, getLinkBandwidth(), m_units_remaining[vnet], - g_eventQueue_ptr->getTime()); - - // Move the message - m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency); - m_in[vnet]->pop(); - - // Count the message - m_message_counters[net_msg_ptr->getMessageSize()][vnet]++; - - DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]); - } - - // Calculate the amount of bandwidth we spent on this message - int diff = m_units_remaining[vnet] - bw_remaining; - m_units_remaining[vnet] = max(0, diff); - bw_remaining = max(0, -diff); + if (iteration_direction) { + for (auto& it : m_in) { + int vnet = it.first; + operateVnet(vnet, bw_remaining, schedule_wakeup, + it.second, m_out[vnet]); } - - if (bw_remaining > 0 && - (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && - !m_out[vnet]->areNSlotsAvailable(1)) { - DPRINTF(RubyNetwork, "vnet: %d", vnet); - // schedule me to wakeup again because I'm waiting for my - // output queue to become available - schedule_wakeup = true; + } else { + for (auto it = m_in.rbegin(); it != m_in.rend(); ++it) { + int vnet = (*it).first; + operateVnet(vnet, bw_remaining, schedule_wakeup, + (*it).second, m_out[vnet]); } } @@ -201,7 +199,7 @@ Throttle::wakeup() double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth())); // If ratio = 0, we used no bandwidth, if ratio = 1, we used all - linkUtilized(ratio); + m_link_utilization_proxy += ratio; if (bw_remaining > 0 && !schedule_wakeup) { // We have extra bandwidth and our output buffer was @@ -213,39 +211,46 @@ Throttle::wakeup() // We are out of bandwidth for this cycle, so wakeup next // cycle and continue - g_eventQueue_ptr->scheduleEvent(this, 1); + scheduleEvent(Cycles(1)); } } void -Throttle::printStats(ostream& out) const +Throttle::regStats(string parent) { - out << "utilized_percent: " << getUtilization() << endl; + m_link_utilization + .name(parent + csprintf(".throttle%i", m_node) + ".link_utilization"); + + for (MessageSizeType type = MessageSizeType_FIRST; + type < MessageSizeType_NUM; ++type) { + m_msg_counts[(unsigned int)type] + .init(Network::getNumberOfVirtualNetworks()) + .name(parent + csprintf(".throttle%i", m_node) + ".msg_count." + + MessageSizeType_to_string(type)) + .flags(Stats::nozero) + ; + m_msg_bytes[(unsigned int) type] + .name(parent + csprintf(".throttle%i", m_node) + ".msg_bytes." + + MessageSizeType_to_string(type)) + .flags(Stats::nozero) + ; + + m_msg_bytes[(unsigned int) type] = m_msg_counts[type] * Stats::constant( + Network::MessageSizeType_to_int(type)); + } } void Throttle::clearStats() { - m_ruby_start = g_eventQueue_ptr->getTime(); - m_links_utilized = 0.0; - - for (int i = 0; i < m_message_counters.size(); i++) { - for (int j = 0; j < m_message_counters[i].size(); j++) { - m_message_counters[i][j] = 0; - } - } + m_link_utilization_proxy = 0; } void -Throttle::printConfig(ostream& out) const -{ -} - -double -Throttle::getUtilization() const +Throttle::collateStats() { - return 100.0 * double(m_links_utilized) / - double(g_eventQueue_ptr->getTime()-m_ruby_start); + m_link_utilization = 100.0 * m_link_utilization_proxy + / (double(g_system_ptr->curCycle() - g_ruby_start)); } void @@ -259,8 +264,7 @@ network_message_to_size(NetworkMessage* net_msg_ptr) { assert(net_msg_ptr != NULL); - int size = RubySystem::getNetwork()-> - MessageSizeType_to_int(net_msg_ptr->getMessageSize()); + int size = Network::MessageSizeType_to_int(net_msg_ptr->getMessageSize()); size *= MESSAGE_SIZE_MULTIPLIER; // Artificially increase the size of broadcast messages