1 # Copyright (c) 2006-2007 The Regents of The University of Michigan
2 # Copyright (c) 2009 Advanced Micro Devices, Inc.
3 # Copyright (c) 2013 Mark D. Hill and David A. Wood
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met: redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer;
10 # redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution;
13 # neither the name of the copyright holders nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 # Authors: Brad Beckmann
34 from m5
.objects
import *
35 from m5
.defines
import buildEnv
36 from Ruby
import create_topology
37 from Ruby
import send_evicts
40 # Declare caches used by the protocol
42 class L0Cache(RubyCache
): pass
43 class L1Cache(RubyCache
): pass
44 class L2Cache(RubyCache
): pass
46 def define_options(parser
):
47 parser
.add_option("--num-clusters", type="int", default
=1,
48 help="number of clusters in a design in which there are shared\
49 caches private to clusters")
52 def create_system(options
, full_system
, system
, dma_ports
, ruby_system
):
54 if buildEnv
['PROTOCOL'] != 'MESI_Three_Level':
55 fatal("This script requires the MESI_Three_Level protocol to be built.")
60 # The ruby network creation expects the list of nodes in the system to be
61 # consistent with the NetDest list. Therefore the l1 controller nodes must be
62 # listed before the directory nodes and directory nodes before dma nodes, etc.
70 assert (options
.num_cpus
% options
.num_clusters
== 0)
71 num_cpus_per_cluster
= options
.num_cpus
/ options
.num_clusters
73 assert (options
.num_l2caches
% options
.num_clusters
== 0)
74 num_l2caches_per_cluster
= options
.num_l2caches
/ options
.num_clusters
76 l2_bits
= int(math
.log(num_l2caches_per_cluster
, 2))
77 block_size_bits
= int(math
.log(options
.cacheline_size
, 2))
78 l2_index_start
= block_size_bits
+ l2_bits
81 # Must create the individual controllers before the network to ensure the
82 # controller constructors are called before the network constructor
84 for i
in xrange(options
.num_clusters
):
85 for j
in xrange(num_cpus_per_cluster
):
87 # First create the Ruby objects associated with this cpu
89 l0i_cache
= L0Cache(size
= '4096B', assoc
= 1, is_icache
= True,
90 start_index_bit
= block_size_bits
,
91 replacement_policy
= LRUReplacementPolicy())
93 l0d_cache
= L0Cache(size
= '4096B', assoc
= 1, is_icache
= False,
94 start_index_bit
= block_size_bits
,
95 replacement_policy
= LRUReplacementPolicy())
97 l0_cntrl
= L0Cache_Controller(version
= i
*num_cpus_per_cluster
+ j
,
98 Icache
= l0i_cache
, Dcache
= l0d_cache
,
99 send_evictions
= send_evicts(options
),
100 clk_domain
=system
.cpu
[i
].clk_domain
,
101 ruby_system
= ruby_system
)
103 cpu_seq
= RubySequencer(version
= i
* num_cpus_per_cluster
+ j
,
105 clk_domain
=system
.cpu
[i
].clk_domain
,
106 dcache
= l0d_cache
, ruby_system
= ruby_system
)
108 l0_cntrl
.sequencer
= cpu_seq
110 l1_cache
= L1Cache(size
= options
.l1d_size
, assoc
= options
.l1d_assoc
,
111 start_index_bit
= block_size_bits
, is_icache
= False)
113 l1_cntrl
= L1Cache_Controller(version
= i
*num_cpus_per_cluster
+j
,
114 cache
= l1_cache
, l2_select_num_bits
= l2_bits
,
115 cluster_id
= i
, ruby_system
= ruby_system
)
117 exec("ruby_system.l0_cntrl%d = l0_cntrl" % (
118 i
*num_cpus_per_cluster
+j
))
119 exec("ruby_system.l1_cntrl%d = l1_cntrl" % (
120 i
*num_cpus_per_cluster
+j
))
123 # Add controllers and sequencers to the appropriate lists
125 cpu_sequencers
.append(cpu_seq
)
126 l0_cntrl_nodes
.append(l0_cntrl
)
127 l1_cntrl_nodes
.append(l1_cntrl
)
129 # Connect the L0 and L1 controllers
130 l0_cntrl
.mandatoryQueue
= MessageBuffer()
131 l0_cntrl
.bufferToL1
= MessageBuffer(ordered
= True)
132 l1_cntrl
.bufferFromL0
= l0_cntrl
.bufferToL1
133 l0_cntrl
.bufferFromL1
= MessageBuffer(ordered
= True)
134 l1_cntrl
.bufferToL0
= l0_cntrl
.bufferFromL1
136 # Connect the L1 controllers and the network
137 l1_cntrl
.requestToL2
= MessageBuffer()
138 l1_cntrl
.requestToL2
.master
= ruby_system
.network
.slave
139 l1_cntrl
.responseToL2
= MessageBuffer()
140 l1_cntrl
.responseToL2
.master
= ruby_system
.network
.slave
141 l1_cntrl
.unblockToL2
= MessageBuffer()
142 l1_cntrl
.unblockToL2
.master
= ruby_system
.network
.slave
144 l1_cntrl
.requestFromL2
= MessageBuffer()
145 l1_cntrl
.requestFromL2
.slave
= ruby_system
.network
.master
146 l1_cntrl
.responseFromL2
= MessageBuffer()
147 l1_cntrl
.responseFromL2
.slave
= ruby_system
.network
.master
150 for j
in xrange(num_l2caches_per_cluster
):
151 l2_cache
= L2Cache(size
= options
.l2_size
,
152 assoc
= options
.l2_assoc
,
153 start_index_bit
= l2_index_start
)
155 l2_cntrl
= L2Cache_Controller(
156 version
= i
* num_l2caches_per_cluster
+ j
,
157 L2cache
= l2_cache
, cluster_id
= i
,
158 transitions_per_cycle
=options
.ports
,
159 ruby_system
= ruby_system
)
161 exec("ruby_system.l2_cntrl%d = l2_cntrl" % (
162 i
* num_l2caches_per_cluster
+ j
))
163 l2_cntrl_nodes
.append(l2_cntrl
)
165 # Connect the L2 controllers and the network
166 l2_cntrl
.DirRequestFromL2Cache
= MessageBuffer()
167 l2_cntrl
.DirRequestFromL2Cache
.master
= ruby_system
.network
.slave
168 l2_cntrl
.L1RequestFromL2Cache
= MessageBuffer()
169 l2_cntrl
.L1RequestFromL2Cache
.master
= ruby_system
.network
.slave
170 l2_cntrl
.responseFromL2Cache
= MessageBuffer()
171 l2_cntrl
.responseFromL2Cache
.master
= ruby_system
.network
.slave
173 l2_cntrl
.unblockToL2Cache
= MessageBuffer()
174 l2_cntrl
.unblockToL2Cache
.slave
= ruby_system
.network
.master
175 l2_cntrl
.L1RequestToL2Cache
= MessageBuffer()
176 l2_cntrl
.L1RequestToL2Cache
.slave
= ruby_system
.network
.master
177 l2_cntrl
.responseToL2Cache
= MessageBuffer()
178 l2_cntrl
.responseToL2Cache
.slave
= ruby_system
.network
.master
180 phys_mem_size
= sum(map(lambda r
: r
.size(), system
.mem_ranges
))
181 assert(phys_mem_size
% options
.num_dirs
== 0)
182 mem_module_size
= phys_mem_size
/ options
.num_dirs
184 # Run each of the ruby memory controllers at a ratio of the frequency of
186 # clk_divider value is a fix to pass regression.
187 ruby_system
.memctrl_clk_domain
= DerivedClockDomain(
188 clk_domain
=ruby_system
.clk_domain
,
191 for i
in xrange(options
.num_dirs
):
193 # Create the Ruby objects associated with the directory controller
195 dir_size
= MemorySize('0B')
196 dir_size
.value
= mem_module_size
198 dir_cntrl
= Directory_Controller(version
= i
,
199 directory
= RubyDirectoryMemory(
200 version
= i
, size
= dir_size
),
201 transitions_per_cycle
= options
.ports
,
202 ruby_system
= ruby_system
)
204 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i
)
205 dir_cntrl_nodes
.append(dir_cntrl
)
207 # Connect the directory controllers and the network
208 dir_cntrl
.requestToDir
= MessageBuffer()
209 dir_cntrl
.requestToDir
.slave
= ruby_system
.network
.master
210 dir_cntrl
.responseToDir
= MessageBuffer()
211 dir_cntrl
.responseToDir
.slave
= ruby_system
.network
.master
212 dir_cntrl
.responseFromDir
= MessageBuffer()
213 dir_cntrl
.responseFromDir
.master
= ruby_system
.network
.slave
214 dir_cntrl
.responseFromMemory
= MessageBuffer()
216 for i
, dma_port
in enumerate(dma_ports
):
218 # Create the Ruby objects associated with the dma controller
220 dma_seq
= DMASequencer(version
= i
,
221 ruby_system
= ruby_system
)
223 dma_cntrl
= DMA_Controller(version
= i
,
224 dma_sequencer
= dma_seq
,
225 transitions_per_cycle
= options
.ports
,
226 ruby_system
= ruby_system
)
228 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i
)
229 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i
)
230 dma_cntrl_nodes
.append(dma_cntrl
)
232 # Connect the dma controller to the network
233 dma_cntrl
.mandatoryQueue
= MessageBuffer()
234 dma_cntrl
.responseFromDir
= MessageBuffer(ordered
= True)
235 dma_cntrl
.responseFromDir
.slave
= ruby_system
.network
.master
236 dma_cntrl
.requestToDir
= MessageBuffer()
237 dma_cntrl
.requestToDir
.master
= ruby_system
.network
.slave
239 all_cntrls
= l0_cntrl_nodes
+ \
245 # Create the io controller and the sequencer
247 io_seq
= DMASequencer(version
=len(dma_ports
), ruby_system
=ruby_system
)
248 ruby_system
._io
_port
= io_seq
249 io_controller
= DMA_Controller(version
= len(dma_ports
),
250 dma_sequencer
= io_seq
,
251 ruby_system
= ruby_system
)
252 ruby_system
.io_controller
= io_controller
254 # Connect the dma controller to the network
255 io_controller
.mandatoryQueue
= MessageBuffer()
256 io_controller
.responseFromDir
= MessageBuffer(ordered
= True)
257 io_controller
.responseFromDir
.slave
= ruby_system
.network
.master
258 io_controller
.requestToDir
= MessageBuffer()
259 io_controller
.requestToDir
.master
= ruby_system
.network
.slave
261 all_cntrls
= all_cntrls
+ [io_controller
]
263 topology
= create_topology(all_cntrls
, options
)
264 return (cpu_sequencers
, dir_cntrl_nodes
, topology
)