1 # Copyright (c) 2019 ARM Limited
4 # The license below extends only to copyright in the software and shall
5 # not be construed as granting a license to any other intellectual
6 # property including but not limited to intellectual property relating
7 # to a hardware implementation of the functionality of the software
8 # licensed hereunder. You may use the software subject to the license
9 # terms below provided that you ensure that this notice is replicated
10 # unmodified and in its entirety in all distributions of the software,
11 # modified or unmodified, in source code or in binary form.
13 # Copyright (c) 2006-2007 The Regents of The University of Michigan
14 # Copyright (c) 2009 Advanced Micro Devices, Inc.
15 # All rights reserved.
17 # Redistribution and use in source and binary forms, with or without
18 # modification, are permitted provided that the following conditions are
19 # met: redistributions of source code must retain the above copyright
20 # notice, this list of conditions and the following disclaimer;
21 # redistributions in binary form must reproduce the above copyright
22 # notice, this list of conditions and the following disclaimer in the
23 # documentation and/or other materials provided with the distribution;
24 # neither the name of the copyright holders nor the names of its
25 # contributors may be used to endorse or promote products derived from
26 # this software without specific prior written permission.
28 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 # Authors: Brad Beckmann
44 from m5
.objects
import *
45 from m5
.defines
import buildEnv
46 from Ruby
import create_topology
, create_directories
47 from Ruby
import send_evicts
50 # Declare caches used by the protocol
52 class L1Cache(RubyCache
):
56 class L2Cache(RubyCache
):
57 dataAccessLatency
= 20
60 def define_options(parser
):
63 def create_system(options
, full_system
, system
, dma_ports
, bootmem
,
66 if buildEnv
['PROTOCOL'] != 'MOESI_CMP_directory':
67 panic("This script requires the MOESI_CMP_directory protocol to be built.")
72 # The ruby network creation expects the list of nodes in the system to be
73 # consistent with the NetDest list. Therefore the l1 controller nodes must be
74 # listed before the directory nodes and directory nodes before dma nodes, etc.
81 # Must create the individual controllers before the network to ensure the
82 # controller constructors are called before the network constructor
84 block_size_bits
= int(math
.log(options
.cacheline_size
, 2))
86 for i
in range(options
.num_cpus
):
88 # First create the Ruby objects associated with this cpu
90 l1i_cache
= L1Cache(size
= options
.l1i_size
,
91 assoc
= options
.l1i_assoc
,
92 start_index_bit
= block_size_bits
,
94 l1d_cache
= L1Cache(size
= options
.l1d_size
,
95 assoc
= options
.l1d_assoc
,
96 start_index_bit
= block_size_bits
,
99 # the ruby random tester reuses num_cpus to specify the
100 # number of cpu ports connected to the tester object, which
101 # is stored in system.cpu. because there is only ever one
102 # tester object, num_cpus is not necessarily equal to the
103 # size of system.cpu; therefore if len(system.cpu) == 1
104 # we use system.cpu[0] to set the clk_domain, thereby ensuring
105 # we don't index off the end of the cpu list.
106 if len(system
.cpu
) == 1:
107 clk_domain
= system
.cpu
[0].clk_domain
109 clk_domain
= system
.cpu
[i
].clk_domain
111 l1_cntrl
= L1Cache_Controller(version
=i
, L1Icache
=l1i_cache
,
113 send_evictions
=send_evicts(options
),
114 transitions_per_cycle
=options
.ports
,
115 clk_domain
=clk_domain
,
116 ruby_system
=ruby_system
)
118 cpu_seq
= RubySequencer(version
=i
, icache
=l1i_cache
,
119 dcache
=l1d_cache
, clk_domain
=clk_domain
,
120 ruby_system
=ruby_system
)
122 l1_cntrl
.sequencer
= cpu_seq
123 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i
)
125 # Add controllers and sequencers to the appropriate lists
126 cpu_sequencers
.append(cpu_seq
)
127 l1_cntrl_nodes
.append(l1_cntrl
)
129 # Connect the L1 controllers and the network
130 l1_cntrl
.mandatoryQueue
= MessageBuffer()
131 l1_cntrl
.requestFromL1Cache
= MessageBuffer()
132 l1_cntrl
.requestFromL1Cache
.master
= ruby_system
.network
.slave
133 l1_cntrl
.responseFromL1Cache
= MessageBuffer()
134 l1_cntrl
.responseFromL1Cache
.master
= ruby_system
.network
.slave
135 l1_cntrl
.requestToL1Cache
= MessageBuffer()
136 l1_cntrl
.requestToL1Cache
.slave
= ruby_system
.network
.master
137 l1_cntrl
.responseToL1Cache
= MessageBuffer()
138 l1_cntrl
.responseToL1Cache
.slave
= ruby_system
.network
.master
139 l1_cntrl
.triggerQueue
= MessageBuffer(ordered
= True)
142 # Create the L2s interleaved addr ranges
144 l2_bits
= int(math
.log(options
.num_l2caches
, 2))
145 numa_bit
= block_size_bits
+ l2_bits
- 1
146 sysranges
= [] + system
.mem_ranges
147 if bootmem
: sysranges
.append(bootmem
.range)
148 for i
in range(options
.num_l2caches
):
151 addr_range
= AddrRange(r
.start
, size
= r
.size(),
152 intlvHighBit
= numa_bit
,
155 ranges
.append(addr_range
)
156 l2_addr_ranges
.append(ranges
)
158 for i
in range(options
.num_l2caches
):
160 # First create the Ruby objects associated with this cpu
162 l2_cache
= L2Cache(size
= options
.l2_size
,
163 assoc
= options
.l2_assoc
,
164 start_index_bit
= block_size_bits
+ l2_bits
)
166 l2_cntrl
= L2Cache_Controller(version
= i
,
168 transitions_per_cycle
= options
.ports
,
169 ruby_system
= ruby_system
,
170 addr_ranges
= l2_addr_ranges
[i
])
172 exec("ruby_system.l2_cntrl%d = l2_cntrl" % i
)
173 l2_cntrl_nodes
.append(l2_cntrl
)
175 # Connect the L2 controllers and the network
176 l2_cntrl
.GlobalRequestFromL2Cache
= MessageBuffer()
177 l2_cntrl
.GlobalRequestFromL2Cache
.master
= ruby_system
.network
.slave
178 l2_cntrl
.L1RequestFromL2Cache
= MessageBuffer()
179 l2_cntrl
.L1RequestFromL2Cache
.master
= ruby_system
.network
.slave
180 l2_cntrl
.responseFromL2Cache
= MessageBuffer()
181 l2_cntrl
.responseFromL2Cache
.master
= ruby_system
.network
.slave
183 l2_cntrl
.GlobalRequestToL2Cache
= MessageBuffer()
184 l2_cntrl
.GlobalRequestToL2Cache
.slave
= ruby_system
.network
.master
185 l2_cntrl
.L1RequestToL2Cache
= MessageBuffer()
186 l2_cntrl
.L1RequestToL2Cache
.slave
= ruby_system
.network
.master
187 l2_cntrl
.responseToL2Cache
= MessageBuffer()
188 l2_cntrl
.responseToL2Cache
.slave
= ruby_system
.network
.master
189 l2_cntrl
.triggerQueue
= MessageBuffer(ordered
= True)
191 # Run each of the ruby memory controllers at a ratio of the frequency of
193 # clk_divider value is a fix to pass regression.
194 ruby_system
.memctrl_clk_domain
= DerivedClockDomain(
195 clk_domain
=ruby_system
.clk_domain
,
199 mem_dir_cntrl_nodes
, rom_dir_cntrl_node
= create_directories(
200 options
, bootmem
, ruby_system
, system
)
201 dir_cntrl_nodes
= mem_dir_cntrl_nodes
[:]
202 if rom_dir_cntrl_node
is not None:
203 dir_cntrl_nodes
.append(rom_dir_cntrl_node
)
204 for dir_cntrl
in dir_cntrl_nodes
:
205 # Connect the directory controllers and the network
206 dir_cntrl
.requestToDir
= MessageBuffer()
207 dir_cntrl
.requestToDir
.slave
= ruby_system
.network
.master
208 dir_cntrl
.responseToDir
= MessageBuffer()
209 dir_cntrl
.responseToDir
.slave
= ruby_system
.network
.master
210 dir_cntrl
.responseFromDir
= MessageBuffer()
211 dir_cntrl
.responseFromDir
.master
= ruby_system
.network
.slave
212 dir_cntrl
.forwardFromDir
= MessageBuffer()
213 dir_cntrl
.forwardFromDir
.master
= ruby_system
.network
.slave
214 dir_cntrl
.responseFromMemory
= MessageBuffer()
217 for i
, dma_port
in enumerate(dma_ports
):
219 # Create the Ruby objects associated with the dma controller
221 dma_seq
= DMASequencer(version
= i
,
222 ruby_system
= ruby_system
,
225 dma_cntrl
= DMA_Controller(version
= i
,
226 dma_sequencer
= dma_seq
,
227 transitions_per_cycle
= options
.ports
,
228 ruby_system
= ruby_system
)
230 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i
)
231 dma_cntrl_nodes
.append(dma_cntrl
)
233 # Connect the dma controller to the network
234 dma_cntrl
.mandatoryQueue
= MessageBuffer()
235 dma_cntrl
.responseFromDir
= MessageBuffer()
236 dma_cntrl
.responseFromDir
.slave
= ruby_system
.network
.master
237 dma_cntrl
.reqToDir
= MessageBuffer()
238 dma_cntrl
.reqToDir
.master
= ruby_system
.network
.slave
239 dma_cntrl
.respToDir
= MessageBuffer()
240 dma_cntrl
.respToDir
.master
= ruby_system
.network
.slave
241 dma_cntrl
.triggerQueue
= MessageBuffer(ordered
= True)
244 all_cntrls
= l1_cntrl_nodes
+ \
249 # Create the io controller and the sequencer
251 io_seq
= DMASequencer(version
=len(dma_ports
), ruby_system
=ruby_system
)
252 ruby_system
._io
_port
= io_seq
253 io_controller
= DMA_Controller(version
= len(dma_ports
),
254 dma_sequencer
= io_seq
,
255 ruby_system
= ruby_system
)
256 ruby_system
.io_controller
= io_controller
258 # Connect the dma controller to the network
259 io_controller
.mandatoryQueue
= MessageBuffer()
260 io_controller
.responseFromDir
= MessageBuffer()
261 io_controller
.responseFromDir
.slave
= ruby_system
.network
.master
262 io_controller
.reqToDir
= MessageBuffer()
263 io_controller
.reqToDir
.master
= ruby_system
.network
.slave
264 io_controller
.respToDir
= MessageBuffer()
265 io_controller
.respToDir
.master
= ruby_system
.network
.slave
266 io_controller
.triggerQueue
= MessageBuffer(ordered
= True)
268 all_cntrls
= all_cntrls
+ [io_controller
]
270 ruby_system
.network
.number_of_virtual_networks
= 3
271 topology
= create_topology(all_cntrls
, options
)
272 return (cpu_sequencers
, mem_dir_cntrl_nodes
, topology
)