1 # Copyright (c) 2006-2007 The Regents of The University of Michigan
2 # Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
3 # Copyright (c) 2013 Mark D. Hill and David A. Wood
4 # Copyright (c) 2020 ARM Limited
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met: redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer;
11 # redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution;
14 # neither the name of the copyright holders nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 from m5
.objects
import *
33 from m5
.defines
import buildEnv
34 from Ruby
import create_topology
, create_directories
35 from Ruby
import send_evicts
36 from common
import FileSystemConfig
39 # Declare caches used by the protocol
41 class L0Cache(RubyCache
): pass
42 class L1Cache(RubyCache
): pass
43 class L2Cache(RubyCache
): pass
45 def define_options(parser
):
46 parser
.add_option("--num-clusters", type = "int", default
= 1,
47 help = "number of clusters in a design in which there are shared\
48 caches private to clusters")
49 parser
.add_option("--l0i_size", type="string", default
="4096B")
50 parser
.add_option("--l0d_size", type="string", default
="4096B")
51 parser
.add_option("--l0i_assoc", type="int", default
=1)
52 parser
.add_option("--l0d_assoc", type="int", default
=1)
53 parser
.add_option("--l0_transitions_per_cycle", type="int", default
=32)
54 parser
.add_option("--l1_transitions_per_cycle", type="int", default
=32)
55 parser
.add_option("--l2_transitions_per_cycle", type="int", default
=4)
58 def create_system(options
, full_system
, system
, dma_ports
, bootmem
,
61 if buildEnv
['PROTOCOL'] != 'MESI_Three_Level':
62 fatal("This script requires the MESI_Three_Level protocol to be\
68 # The ruby network creation expects the list of nodes in the system to be
69 # consistent with the NetDest list. Therefore the l1 controller nodes
70 # must be listed before the directory nodes and directory nodes before
78 assert (options
.num_cpus
% options
.num_clusters
== 0)
79 num_cpus_per_cluster
= options
.num_cpus
/ options
.num_clusters
81 assert (options
.num_l2caches
% options
.num_clusters
== 0)
82 num_l2caches_per_cluster
= options
.num_l2caches
/ options
.num_clusters
84 l2_bits
= int(math
.log(num_l2caches_per_cluster
, 2))
85 block_size_bits
= int(math
.log(options
.cacheline_size
, 2))
86 l2_index_start
= block_size_bits
+ l2_bits
89 # Must create the individual controllers before the network to ensure the
90 # controller constructors are called before the network constructor
92 for i
in range(options
.num_clusters
):
93 for j
in range(num_cpus_per_cluster
):
95 # First create the Ruby objects associated with this cpu
97 l0i_cache
= L0Cache(size
= options
.l0i_size
,
98 assoc
= options
.l0i_assoc
,
100 start_index_bit
= block_size_bits
,
101 replacement_policy
= LRURP())
103 l0d_cache
= L0Cache(size
= options
.l0d_size
,
104 assoc
= options
.l0d_assoc
,
106 start_index_bit
= block_size_bits
,
107 replacement_policy
= LRURP())
109 # the ruby random tester reuses num_cpus to specify the
110 # number of cpu ports connected to the tester object, which
111 # is stored in system.cpu. because there is only ever one
112 # tester object, num_cpus is not necessarily equal to the
113 # size of system.cpu; therefore if len(system.cpu) == 1
114 # we use system.cpu[0] to set the clk_domain, thereby ensuring
115 # we don't index off the end of the cpu list.
116 if len(system
.cpu
) == 1:
117 clk_domain
= system
.cpu
[0].clk_domain
119 clk_domain
= system
.cpu
[i
].clk_domain
121 l0_cntrl
= L0Cache_Controller(
122 version
= i
* num_cpus_per_cluster
+ j
,
123 Icache
= l0i_cache
, Dcache
= l0d_cache
,
124 transitions_per_cycle
= options
.l0_transitions_per_cycle
,
125 send_evictions
= send_evicts(options
),
126 clk_domain
= clk_domain
,
127 ruby_system
= ruby_system
)
129 cpu_seq
= RubySequencer(version
= i
* num_cpus_per_cluster
+ j
,
131 clk_domain
= clk_domain
,
133 ruby_system
= ruby_system
)
135 l0_cntrl
.sequencer
= cpu_seq
137 l1_cache
= L1Cache(size
= options
.l1d_size
,
138 assoc
= options
.l1d_assoc
,
139 start_index_bit
= block_size_bits
,
142 l1_cntrl
= L1Cache_Controller(
143 version
= i
* num_cpus_per_cluster
+ j
,
144 cache
= l1_cache
, l2_select_num_bits
= l2_bits
,
146 transitions_per_cycle
= options
.l1_transitions_per_cycle
,
147 ruby_system
= ruby_system
)
149 exec("ruby_system.l0_cntrl%d = l0_cntrl"
150 % ( i
* num_cpus_per_cluster
+ j
))
151 exec("ruby_system.l1_cntrl%d = l1_cntrl"
152 % ( i
* num_cpus_per_cluster
+ j
))
155 # Add controllers and sequencers to the appropriate lists
157 cpu_sequencers
.append(cpu_seq
)
158 l0_cntrl_nodes
.append(l0_cntrl
)
159 l1_cntrl_nodes
.append(l1_cntrl
)
161 # Connect the L0 and L1 controllers
162 l0_cntrl
.mandatoryQueue
= MessageBuffer()
163 l0_cntrl
.bufferToL1
= MessageBuffer(ordered
= True)
164 l1_cntrl
.bufferFromL0
= l0_cntrl
.bufferToL1
165 l0_cntrl
.bufferFromL1
= MessageBuffer(ordered
= True)
166 l1_cntrl
.bufferToL0
= l0_cntrl
.bufferFromL1
168 # Connect the L1 controllers and the network
169 l1_cntrl
.requestToL2
= MessageBuffer()
170 l1_cntrl
.requestToL2
.master
= ruby_system
.network
.slave
171 l1_cntrl
.responseToL2
= MessageBuffer()
172 l1_cntrl
.responseToL2
.master
= ruby_system
.network
.slave
173 l1_cntrl
.unblockToL2
= MessageBuffer()
174 l1_cntrl
.unblockToL2
.master
= ruby_system
.network
.slave
176 l1_cntrl
.requestFromL2
= MessageBuffer()
177 l1_cntrl
.requestFromL2
.slave
= ruby_system
.network
.master
178 l1_cntrl
.responseFromL2
= MessageBuffer()
179 l1_cntrl
.responseFromL2
.slave
= ruby_system
.network
.master
182 for j
in range(num_l2caches_per_cluster
):
183 l2_cache
= L2Cache(size
= options
.l2_size
,
184 assoc
= options
.l2_assoc
,
185 start_index_bit
= l2_index_start
)
187 l2_cntrl
= L2Cache_Controller(
188 version
= i
* num_l2caches_per_cluster
+ j
,
189 L2cache
= l2_cache
, cluster_id
= i
,
190 transitions_per_cycle
=\
191 options
.l2_transitions_per_cycle
,
192 ruby_system
= ruby_system
)
194 exec("ruby_system.l2_cntrl%d = l2_cntrl"
195 % (i
* num_l2caches_per_cluster
+ j
))
196 l2_cntrl_nodes
.append(l2_cntrl
)
198 # Connect the L2 controllers and the network
199 l2_cntrl
.DirRequestFromL2Cache
= MessageBuffer()
200 l2_cntrl
.DirRequestFromL2Cache
.master
= ruby_system
.network
.slave
201 l2_cntrl
.L1RequestFromL2Cache
= MessageBuffer()
202 l2_cntrl
.L1RequestFromL2Cache
.master
= ruby_system
.network
.slave
203 l2_cntrl
.responseFromL2Cache
= MessageBuffer()
204 l2_cntrl
.responseFromL2Cache
.master
= ruby_system
.network
.slave
206 l2_cntrl
.unblockToL2Cache
= MessageBuffer()
207 l2_cntrl
.unblockToL2Cache
.slave
= ruby_system
.network
.master
208 l2_cntrl
.L1RequestToL2Cache
= MessageBuffer()
209 l2_cntrl
.L1RequestToL2Cache
.slave
= ruby_system
.network
.master
210 l2_cntrl
.responseToL2Cache
= MessageBuffer()
211 l2_cntrl
.responseToL2Cache
.slave
= ruby_system
.network
.master
213 # Run each of the ruby memory controllers at a ratio of the frequency of
215 # clk_divider value is a fix to pass regression.
216 ruby_system
.memctrl_clk_domain
= DerivedClockDomain(
217 clk_domain
= ruby_system
.clk_domain
, clk_divider
= 3)
219 mem_dir_cntrl_nodes
, rom_dir_cntrl_node
= create_directories(
220 options
, bootmem
, ruby_system
, system
)
221 dir_cntrl_nodes
= mem_dir_cntrl_nodes
[:]
222 if rom_dir_cntrl_node
is not None:
223 dir_cntrl_nodes
.append(rom_dir_cntrl_node
)
224 for dir_cntrl
in dir_cntrl_nodes
:
225 # Connect the directory controllers and the network
226 dir_cntrl
.requestToDir
= MessageBuffer()
227 dir_cntrl
.requestToDir
.slave
= ruby_system
.network
.master
228 dir_cntrl
.responseToDir
= MessageBuffer()
229 dir_cntrl
.responseToDir
.slave
= ruby_system
.network
.master
230 dir_cntrl
.responseFromDir
= MessageBuffer()
231 dir_cntrl
.responseFromDir
.master
= ruby_system
.network
.slave
232 dir_cntrl
.responseFromMemory
= MessageBuffer()
234 for i
, dma_port
in enumerate(dma_ports
):
236 # Create the Ruby objects associated with the dma controller
238 dma_seq
= DMASequencer(version
= i
, ruby_system
= ruby_system
)
240 dma_cntrl
= DMA_Controller(version
= i
,
241 dma_sequencer
= dma_seq
,
242 transitions_per_cycle
= options
.ports
,
243 ruby_system
= ruby_system
)
245 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i
)
246 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i
)
247 dma_cntrl_nodes
.append(dma_cntrl
)
249 # Connect the dma controller to the network
250 dma_cntrl
.mandatoryQueue
= MessageBuffer()
251 dma_cntrl
.responseFromDir
= MessageBuffer(ordered
= True)
252 dma_cntrl
.responseFromDir
.slave
= ruby_system
.network
.master
253 dma_cntrl
.requestToDir
= MessageBuffer()
254 dma_cntrl
.requestToDir
.master
= ruby_system
.network
.slave
256 all_cntrls
= l0_cntrl_nodes
+ \
262 # Create the io controller and the sequencer
264 io_seq
= DMASequencer(version
=len(dma_ports
), ruby_system
=ruby_system
)
265 ruby_system
._io
_port
= io_seq
266 io_controller
= DMA_Controller(version
= len(dma_ports
),
267 dma_sequencer
= io_seq
,
268 ruby_system
= ruby_system
)
269 ruby_system
.io_controller
= io_controller
271 # Connect the dma controller to the network
272 io_controller
.mandatoryQueue
= MessageBuffer()
273 io_controller
.responseFromDir
= MessageBuffer(ordered
= True)
274 io_controller
.responseFromDir
.slave
= ruby_system
.network
.master
275 io_controller
.requestToDir
= MessageBuffer()
276 io_controller
.requestToDir
.master
= ruby_system
.network
.slave
278 all_cntrls
= all_cntrls
+ [io_controller
]
279 # Register configuration with filesystem
281 for i
in xrange(options
.num_clusters
):
282 for j
in xrange(num_cpus_per_cluster
):
283 FileSystemConfig
.register_cpu(physical_package_id
= 0,
284 core_siblings
= xrange(options
.num_cpus
),
285 core_id
= i
*num_cpus_per_cluster
+j
,
286 thread_siblings
= [])
288 FileSystemConfig
.register_cache(level
= 0,
289 idu_type
= 'Instruction',
290 size
= options
.l0i_size
,
292 options
.cacheline_size
,
294 cpus
= [i
*num_cpus_per_cluster
+j
])
295 FileSystemConfig
.register_cache(level
= 0,
297 size
= options
.l0d_size
,
299 options
.cacheline_size
,
301 cpus
= [i
*num_cpus_per_cluster
+j
])
303 FileSystemConfig
.register_cache(level
= 1,
304 idu_type
= 'Unified',
305 size
= options
.l1d_size
,
306 line_size
= options
.cacheline_size
,
307 assoc
= options
.l1d_assoc
,
308 cpus
= [i
*num_cpus_per_cluster
+j
])
310 FileSystemConfig
.register_cache(level
= 2,
311 idu_type
= 'Unified',
312 size
= str(MemorySize(options
.l2_size
) * \
313 num_l2caches_per_cluster
)+'B',
314 line_size
= options
.cacheline_size
,
315 assoc
= options
.l2_assoc
,
316 cpus
= [n
for n
in xrange(i
*num_cpus_per_cluster
, \
317 (i
+1)*num_cpus_per_cluster
)])
319 ruby_system
.network
.number_of_virtual_networks
= 3
320 topology
= create_topology(all_cntrls
, options
)
321 return (cpu_sequencers
, mem_dir_cntrl_nodes
, topology
)