configs, mem: Support running VIPER with GCN3
[gem5.git] / configs / ruby / MESI_Two_Level.py
1 # Copyright (c) 2006-2007 The Regents of The University of Michigan
2 # Copyright (c) 2009 Advanced Micro Devices, Inc.
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
15 #
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 import math
29 import m5
30 from m5.objects import *
31 from m5.defines import buildEnv
32 from .Ruby import create_topology, create_directories
33 from .Ruby import send_evicts
34
35 #
36 # Declare caches used by the protocol
37 #
38 class L1Cache(RubyCache): pass
39 class L2Cache(RubyCache): pass
40
41 def define_options(parser):
42 return
43
44 def create_system(options, full_system, system, dma_ports, bootmem,
45 ruby_system):
46
47 if buildEnv['PROTOCOL'] != 'MESI_Two_Level':
48 fatal("This script requires the MESI_Two_Level protocol to be built.")
49
50 cpu_sequencers = []
51
52 #
53 # The ruby network creation expects the list of nodes in the system to be
54 # consistent with the NetDest list. Therefore the l1 controller nodes must be
55 # listed before the directory nodes and directory nodes before dma nodes, etc.
56 #
57 l1_cntrl_nodes = []
58 l2_cntrl_nodes = []
59 dma_cntrl_nodes = []
60
61 #
62 # Must create the individual controllers before the network to ensure the
63 # controller constructors are called before the network constructor
64 #
65 l2_bits = int(math.log(options.num_l2caches, 2))
66 block_size_bits = int(math.log(options.cacheline_size, 2))
67
68 for i in range(options.num_cpus):
69 #
70 # First create the Ruby objects associated with this cpu
71 #
72 l1i_cache = L1Cache(size = options.l1i_size,
73 assoc = options.l1i_assoc,
74 start_index_bit = block_size_bits,
75 is_icache = True)
76 l1d_cache = L1Cache(size = options.l1d_size,
77 assoc = options.l1d_assoc,
78 start_index_bit = block_size_bits,
79 is_icache = False)
80
81 prefetcher = RubyPrefetcher()
82
83 # the ruby random tester reuses num_cpus to specify the
84 # number of cpu ports connected to the tester object, which
85 # is stored in system.cpu. because there is only ever one
86 # tester object, num_cpus is not necessarily equal to the
87 # size of system.cpu; therefore if len(system.cpu) == 1
88 # we use system.cpu[0] to set the clk_domain, thereby ensuring
89 # we don't index off the end of the cpu list.
90 if len(system.cpu) == 1:
91 clk_domain = system.cpu[0].clk_domain
92 else:
93 clk_domain = system.cpu[i].clk_domain
94
95 l1_cntrl = L1Cache_Controller(version = i, L1Icache = l1i_cache,
96 L1Dcache = l1d_cache,
97 l2_select_num_bits = l2_bits,
98 send_evictions = send_evicts(options),
99 prefetcher = prefetcher,
100 ruby_system = ruby_system,
101 clk_domain = clk_domain,
102 transitions_per_cycle = options.ports,
103 enable_prefetch = False)
104
105 cpu_seq = RubySequencer(version = i, icache = l1i_cache,
106 dcache = l1d_cache, clk_domain = clk_domain,
107 ruby_system = ruby_system)
108
109
110 l1_cntrl.sequencer = cpu_seq
111 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
112
113 # Add controllers and sequencers to the appropriate lists
114 cpu_sequencers.append(cpu_seq)
115 l1_cntrl_nodes.append(l1_cntrl)
116
117 # Connect the L1 controllers and the network
118 l1_cntrl.mandatoryQueue = MessageBuffer()
119 l1_cntrl.requestFromL1Cache = MessageBuffer()
120 l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
121 l1_cntrl.responseFromL1Cache = MessageBuffer()
122 l1_cntrl.responseFromL1Cache.master = ruby_system.network.slave
123 l1_cntrl.unblockFromL1Cache = MessageBuffer()
124 l1_cntrl.unblockFromL1Cache.master = ruby_system.network.slave
125
126 l1_cntrl.optionalQueue = MessageBuffer()
127
128 l1_cntrl.requestToL1Cache = MessageBuffer()
129 l1_cntrl.requestToL1Cache.slave = ruby_system.network.master
130 l1_cntrl.responseToL1Cache = MessageBuffer()
131 l1_cntrl.responseToL1Cache.slave = ruby_system.network.master
132
133
134 l2_index_start = block_size_bits + l2_bits
135
136 for i in range(options.num_l2caches):
137 #
138 # First create the Ruby objects associated with this cpu
139 #
140 l2_cache = L2Cache(size = options.l2_size,
141 assoc = options.l2_assoc,
142 start_index_bit = l2_index_start)
143
144 l2_cntrl = L2Cache_Controller(version = i,
145 L2cache = l2_cache,
146 transitions_per_cycle = options.ports,
147 ruby_system = ruby_system)
148
149 exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
150 l2_cntrl_nodes.append(l2_cntrl)
151
152 # Connect the L2 controllers and the network
153 l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
154 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
155 l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
156 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
157 l2_cntrl.responseFromL2Cache = MessageBuffer()
158 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
159
160 l2_cntrl.unblockToL2Cache = MessageBuffer()
161 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
162 l2_cntrl.L1RequestToL2Cache = MessageBuffer()
163 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
164 l2_cntrl.responseToL2Cache = MessageBuffer()
165 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
166
167
168 # Run each of the ruby memory controllers at a ratio of the frequency of
169 # the ruby system
170 # clk_divider value is a fix to pass regression.
171 ruby_system.memctrl_clk_domain = DerivedClockDomain(
172 clk_domain = ruby_system.clk_domain,
173 clk_divider = 3)
174
175 mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
176 options, bootmem, ruby_system, system)
177 dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
178 if rom_dir_cntrl_node is not None:
179 dir_cntrl_nodes.append(rom_dir_cntrl_node)
180 for dir_cntrl in dir_cntrl_nodes:
181 # Connect the directory controllers and the network
182 dir_cntrl.requestToDir = MessageBuffer()
183 dir_cntrl.requestToDir.slave = ruby_system.network.master
184 dir_cntrl.responseToDir = MessageBuffer()
185 dir_cntrl.responseToDir.slave = ruby_system.network.master
186 dir_cntrl.responseFromDir = MessageBuffer()
187 dir_cntrl.responseFromDir.master = ruby_system.network.slave
188 dir_cntrl.requestToMemory = MessageBuffer()
189 dir_cntrl.responseFromMemory = MessageBuffer()
190
191
192 for i, dma_port in enumerate(dma_ports):
193 # Create the Ruby objects associated with the dma controller
194 dma_seq = DMASequencer(version = i, ruby_system = ruby_system,
195 slave = dma_port)
196
197 dma_cntrl = DMA_Controller(version = i, dma_sequencer = dma_seq,
198 transitions_per_cycle = options.ports,
199 ruby_system = ruby_system)
200
201 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
202 dma_cntrl_nodes.append(dma_cntrl)
203
204 # Connect the dma controller to the network
205 dma_cntrl.mandatoryQueue = MessageBuffer()
206 dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
207 dma_cntrl.responseFromDir.slave = ruby_system.network.master
208 dma_cntrl.requestToDir = MessageBuffer()
209 dma_cntrl.requestToDir.master = ruby_system.network.slave
210
211 all_cntrls = l1_cntrl_nodes + \
212 l2_cntrl_nodes + \
213 dir_cntrl_nodes + \
214 dma_cntrl_nodes
215
216 # Create the io controller and the sequencer
217 if full_system:
218 io_seq = DMASequencer(version = len(dma_ports),
219 ruby_system = ruby_system)
220 ruby_system._io_port = io_seq
221 io_controller = DMA_Controller(version = len(dma_ports),
222 dma_sequencer = io_seq,
223 ruby_system = ruby_system)
224 ruby_system.io_controller = io_controller
225
226 # Connect the dma controller to the network
227 io_controller.mandatoryQueue = MessageBuffer()
228 io_controller.responseFromDir = MessageBuffer(ordered = True)
229 io_controller.responseFromDir.slave = ruby_system.network.master
230 io_controller.requestToDir = MessageBuffer()
231 io_controller.requestToDir.master = ruby_system.network.slave
232
233 all_cntrls = all_cntrls + [io_controller]
234
235 ruby_system.network.number_of_virtual_networks = 3
236 topology = create_topology(all_cntrls, options)
237 return (cpu_sequencers, mem_dir_cntrl_nodes, topology)