2 * Copyright (c) 2008 Princeton University
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Niket Agarwal
31 #include "mem/ruby/buffers/MessageBuffer.hh"
32 #include "mem/ruby/network/BasicLink.hh"
33 #include "mem/ruby/network/Topology.hh"
34 #include "mem/ruby/network/garnet/BaseGarnetNetwork.hh"
38 BaseGarnetNetwork::BaseGarnetNetwork(const Params
*p
)
41 m_ni_flit_size
= p
->ni_flit_size
;
42 m_vcs_per_vnet
= p
->vcs_per_vnet
;
43 m_enable_fault_model
= p
->enable_fault_model
;
44 if (m_enable_fault_model
)
45 fault_model
= p
->fault_model
;
47 // Currently Garnet only supports uniform bandwidth for all
48 // links and network interfaces.
49 for (std::vector
<BasicExtLink
*>::const_iterator i
= p
->ext_links
.begin();
50 i
!= p
->ext_links
.end(); ++i
) {
51 BasicExtLink
* ext_link
= (*i
);
52 if (ext_link
->params()->bandwidth_factor
!= m_ni_flit_size
) {
53 fatal("Garnet only supports uniform bw across all links and NIs\n");
56 for (std::vector
<BasicIntLink
*>::const_iterator i
= p
->int_links
.begin();
57 i
!= p
->int_links
.end(); ++i
) {
58 BasicIntLink
* int_link
= (*i
);
59 if (int_link
->params()->bandwidth_factor
!= m_ni_flit_size
) {
60 fatal("Garnet only supports uniform bw across all links and NIs\n");
64 // Allocate to and from queues
66 // Queues that are getting messages from protocol
67 m_toNetQueues
.resize(m_nodes
);
69 // Queues that are feeding the protocol
70 m_fromNetQueues
.resize(m_nodes
);
72 m_in_use
.resize(m_virtual_networks
);
73 m_ordered
.resize(m_virtual_networks
);
74 for (int i
= 0; i
< m_virtual_networks
; i
++) {
79 for (int node
= 0; node
< m_nodes
; node
++) {
80 // Setting number of virtual message buffers per Network Queue
81 m_toNetQueues
[node
].resize(m_virtual_networks
);
82 m_fromNetQueues
[node
].resize(m_virtual_networks
);
84 // Instantiating the Message Buffers that
85 // interact with the coherence protocol
86 for (int j
= 0; j
< m_virtual_networks
; j
++) {
87 m_toNetQueues
[node
][j
] = new MessageBuffer();
88 m_fromNetQueues
[node
][j
] = new MessageBuffer();
94 BaseGarnetNetwork::init()
100 BaseGarnetNetwork::getToNetQueue(NodeID id
, bool ordered
, int network_num
,
103 checkNetworkAllocation(id
, ordered
, network_num
, vnet_type
);
104 return m_toNetQueues
[id
][network_num
];
108 BaseGarnetNetwork::getFromNetQueue(NodeID id
, bool ordered
, int network_num
,
111 checkNetworkAllocation(id
, ordered
, network_num
, vnet_type
);
112 return m_fromNetQueues
[id
][network_num
];
116 BaseGarnetNetwork::regStats()
119 .init(m_virtual_networks
)
120 .name(name() + ".flits_received")
121 .flags(Stats::pdf
| Stats::total
| Stats::nozero
| Stats::oneline
)
125 .init(m_virtual_networks
)
126 .name(name() + ".flits_injected")
127 .flags(Stats::pdf
| Stats::total
| Stats::nozero
| Stats::oneline
)
131 .init(m_virtual_networks
)
132 .name(name() + ".network_latency")
133 .flags(Stats::oneline
)
137 .init(m_virtual_networks
)
138 .name(name() + ".queueing_latency")
139 .flags(Stats::oneline
)
142 for (int i
= 0; i
< m_virtual_networks
; i
++) {
143 m_flits_received
.subname(i
, csprintf("vnet-%i", i
));
144 m_flits_injected
.subname(i
, csprintf("vnet-%i", i
));
145 m_network_latency
.subname(i
, csprintf("vnet-%i", i
));
146 m_queueing_latency
.subname(i
, csprintf("vnet-%i", i
));
150 .name(name() + ".average_vnet_latency")
151 .flags(Stats::oneline
);
152 m_avg_vnet_latency
= m_network_latency
/ m_flits_received
;
155 .name(name() + ".average_vqueue_latency")
156 .flags(Stats::oneline
);
157 m_avg_vqueue_latency
= m_queueing_latency
/ m_flits_received
;
159 m_avg_network_latency
.name(name() + ".average_network_latency");
160 m_avg_network_latency
= sum(m_network_latency
) / sum(m_flits_received
);
162 m_avg_queueing_latency
.name(name() + ".average_queueing_latency");
163 m_avg_queueing_latency
= sum(m_queueing_latency
) / sum(m_flits_received
);
165 m_avg_latency
.name(name() + ".average_latency");
166 m_avg_latency
= m_avg_network_latency
+ m_avg_queueing_latency
;