1 # Copyright (c) 2006-2007 The Regents of The University of Michigan
2 # Copyright (c) 2009 Advanced Micro Devices, Inc.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 # Authors: Brad Beckmann
32 from m5
.objects
import *
33 from m5
.defines
import buildEnv
34 from Ruby
import create_topology
35 from Ruby
import send_evicts
38 # Declare caches used by the protocol
40 class L1Cache(RubyCache
): pass
41 class L2Cache(RubyCache
): pass
43 # Probe filter is a cache
45 class ProbeFilter(RubyCache
): pass
47 def define_options(parser
):
48 parser
.add_option("--allow-atomic-migration", action
="store_true",
49 help="allow migratory sharing for atomic only accessed blocks")
50 parser
.add_option("--pf-on", action
="store_true",
51 help="Hammer: enable Probe Filter")
52 parser
.add_option("--dir-on", action
="store_true",
53 help="Hammer: enable Full-bit Directory")
55 def create_system(options
, full_system
, system
, dma_ports
, ruby_system
):
57 if buildEnv
['PROTOCOL'] != 'MOESI_hammer':
58 panic("This script requires the MOESI_hammer protocol to be built.")
63 # The ruby network creation expects the list of nodes in the system to be
64 # consistent with the NetDest list. Therefore the l1 controller nodes must be
65 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 # Must create the individual controllers before the network to ensure the
73 # controller constructors are called before the network constructor
75 block_size_bits
= int(math
.log(options
.cacheline_size
, 2))
77 for i
in xrange(options
.num_cpus
):
79 # First create the Ruby objects associated with this cpu
81 l1i_cache
= L1Cache(size
= options
.l1i_size
,
82 assoc
= options
.l1i_assoc
,
83 start_index_bit
= block_size_bits
,
85 l1d_cache
= L1Cache(size
= options
.l1d_size
,
86 assoc
= options
.l1d_assoc
,
87 start_index_bit
= block_size_bits
)
88 l2_cache
= L2Cache(size
= options
.l2_size
,
89 assoc
= options
.l2_assoc
,
90 start_index_bit
= block_size_bits
)
92 l1_cntrl
= L1Cache_Controller(version
= i
,
97 options
.allow_atomic_migration
,
98 send_evictions
= send_evicts(options
),
99 transitions_per_cycle
= options
.ports
,
100 clk_domain
=system
.cpu
[i
].clk_domain
,
101 ruby_system
= ruby_system
)
103 cpu_seq
= RubySequencer(version
= i
,
106 clk_domain
=system
.cpu
[i
].clk_domain
,
107 ruby_system
= ruby_system
)
109 l1_cntrl
.sequencer
= cpu_seq
110 if options
.recycle_latency
:
111 l1_cntrl
.recycle_latency
= options
.recycle_latency
113 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i
)
115 # Add controllers and sequencers to the appropriate lists
116 cpu_sequencers
.append(cpu_seq
)
117 l1_cntrl_nodes
.append(l1_cntrl
)
119 # Connect the L1 controller and the network
120 # Connect the buffers from the controller to network
121 l1_cntrl
.requestFromCache
= MessageBuffer()
122 l1_cntrl
.requestFromCache
.master
= ruby_system
.network
.slave
123 l1_cntrl
.responseFromCache
= MessageBuffer()
124 l1_cntrl
.responseFromCache
.master
= ruby_system
.network
.slave
125 l1_cntrl
.unblockFromCache
= MessageBuffer()
126 l1_cntrl
.unblockFromCache
.master
= ruby_system
.network
.slave
128 l1_cntrl
.triggerQueue
= MessageBuffer()
130 # Connect the buffers from the network to the controller
131 l1_cntrl
.mandatoryQueue
= MessageBuffer()
132 l1_cntrl
.forwardToCache
= MessageBuffer()
133 l1_cntrl
.forwardToCache
.slave
= ruby_system
.network
.master
134 l1_cntrl
.responseToCache
= MessageBuffer()
135 l1_cntrl
.responseToCache
.slave
= ruby_system
.network
.master
138 phys_mem_size
= sum(map(lambda r
: r
.size(), system
.mem_ranges
))
139 assert(phys_mem_size
% options
.num_dirs
== 0)
140 mem_module_size
= phys_mem_size
/ options
.num_dirs
143 # determine size and index bits for probe filter
144 # By default, the probe filter size is configured to be twice the
145 # size of the L2 cache.
147 pf_size
= MemorySize(options
.l2_size
)
148 pf_size
.value
= pf_size
.value
* 2
149 dir_bits
= int(math
.log(options
.num_dirs
, 2))
150 pf_bits
= int(math
.log(pf_size
.value
, 2))
151 if options
.numa_high_bit
:
152 if options
.pf_on
or options
.dir_on
:
153 # if numa high bit explicitly set, make sure it does not overlap
154 # with the probe filter index
155 assert(options
.numa_high_bit
- dir_bits
> pf_bits
)
157 # set the probe filter start bit to just above the block offset
158 pf_start_bit
= block_size_bits
161 pf_start_bit
= dir_bits
+ block_size_bits
- 1
163 pf_start_bit
= block_size_bits
165 # Run each of the ruby memory controllers at a ratio of the frequency of
167 # clk_divider value is a fix to pass regression.
168 ruby_system
.memctrl_clk_domain
= DerivedClockDomain(
169 clk_domain
=ruby_system
.clk_domain
,
172 for i
in xrange(options
.num_dirs
):
173 dir_size
= MemorySize('0B')
174 dir_size
.value
= mem_module_size
176 pf
= ProbeFilter(size
= pf_size
, assoc
= 4,
177 start_index_bit
= pf_start_bit
)
179 dir_cntrl
= Directory_Controller(version
= i
,
180 directory
= RubyDirectoryMemory(
181 version
= i
, size
= dir_size
),
183 probe_filter_enabled
= options
.pf_on
,
184 full_bit_dir_enabled
= options
.dir_on
,
185 transitions_per_cycle
= options
.ports
,
186 ruby_system
= ruby_system
)
188 if options
.recycle_latency
:
189 dir_cntrl
.recycle_latency
= options
.recycle_latency
191 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i
)
192 dir_cntrl_nodes
.append(dir_cntrl
)
194 # Connect the directory controller to the network
195 dir_cntrl
.forwardFromDir
= MessageBuffer()
196 dir_cntrl
.forwardFromDir
.master
= ruby_system
.network
.slave
197 dir_cntrl
.responseFromDir
= MessageBuffer()
198 dir_cntrl
.responseFromDir
.master
= ruby_system
.network
.slave
199 dir_cntrl
.dmaResponseFromDir
= MessageBuffer(ordered
= True)
200 dir_cntrl
.dmaResponseFromDir
.master
= ruby_system
.network
.slave
202 dir_cntrl
.triggerQueue
= MessageBuffer(ordered
= True)
204 dir_cntrl
.unblockToDir
= MessageBuffer()
205 dir_cntrl
.unblockToDir
.slave
= ruby_system
.network
.master
206 dir_cntrl
.responseToDir
= MessageBuffer()
207 dir_cntrl
.responseToDir
.slave
= ruby_system
.network
.master
208 dir_cntrl
.requestToDir
= MessageBuffer()
209 dir_cntrl
.requestToDir
.slave
= ruby_system
.network
.master
210 dir_cntrl
.dmaRequestToDir
= MessageBuffer(ordered
= True)
211 dir_cntrl
.dmaRequestToDir
.slave
= ruby_system
.network
.master
212 dir_cntrl
.responseFromMemory
= MessageBuffer()
215 for i
, dma_port
in enumerate(dma_ports
):
217 # Create the Ruby objects associated with the dma controller
219 dma_seq
= DMASequencer(version
= i
,
220 ruby_system
= ruby_system
,
223 dma_cntrl
= DMA_Controller(version
= i
,
224 dma_sequencer
= dma_seq
,
225 transitions_per_cycle
= options
.ports
,
226 ruby_system
= ruby_system
)
228 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i
)
229 dma_cntrl_nodes
.append(dma_cntrl
)
231 if options
.recycle_latency
:
232 dma_cntrl
.recycle_latency
= options
.recycle_latency
234 # Connect the dma controller to the network
235 dma_cntrl
.responseFromDir
= MessageBuffer(ordered
= True)
236 dma_cntrl
.responseFromDir
.slave
= ruby_system
.network
.master
237 dma_cntrl
.requestToDir
= MessageBuffer()
238 dma_cntrl
.requestToDir
.master
= ruby_system
.network
.slave
239 dma_cntrl
.mandatoryQueue
= MessageBuffer()
241 all_cntrls
= l1_cntrl_nodes
+ dir_cntrl_nodes
+ dma_cntrl_nodes
243 # Create the io controller and the sequencer
245 io_seq
= DMASequencer(version
=len(dma_ports
), ruby_system
=ruby_system
)
246 ruby_system
._io
_port
= io_seq
247 io_controller
= DMA_Controller(version
= len(dma_ports
),
248 dma_sequencer
= io_seq
,
249 ruby_system
= ruby_system
)
250 ruby_system
.io_controller
= io_controller
252 # Connect the dma controller to the network
253 io_controller
.responseFromDir
= MessageBuffer(ordered
= True)
254 io_controller
.responseFromDir
.slave
= ruby_system
.network
.master
255 io_controller
.requestToDir
= MessageBuffer()
256 io_controller
.requestToDir
.master
= ruby_system
.network
.slave
257 io_controller
.mandatoryQueue
= MessageBuffer()
259 all_cntrls
= all_cntrls
+ [io_controller
]
261 topology
= create_topology(all_cntrls
, options
)
262 return (cpu_sequencers
, dir_cntrl_nodes
, topology
)