ruby: message buffers: significant changes
authorNilay Vaish <nilay@cs.wisc.edu>
Mon, 1 Sep 2014 21:55:47 +0000 (16:55 -0500)
committerNilay Vaish <nilay@cs.wisc.edu>
Mon, 1 Sep 2014 21:55:47 +0000 (16:55 -0500)
This patch is the final patch in a series of patches.  The aim of the series
is to make ruby more configurable than it was.  More specifically, the
connections between controllers are not at all possible (unless one is ready
to make significant changes to the coherence protocol).  Moreover the buffers
themselves are magically connected to the network inside the slicc code.
These connections are not part of the configuration file.

This patch makes changes so that these connections will now be made in the
python configuration files associated with the protocols.  This requires
each state machine to expose the message buffers it uses for input and output.
So, the patch makes these buffers configurable members of the machines.

The patch drops the slicc code that usd to connect these buffers to the
network.  Now these buffers are exposed to the python configuration system
as Master and Slave ports.  In the configuration files, any master port
can be connected any slave port.  The file pyobject.cc has been modified to
take care of allocating the actual message buffer.  This is inline with how
other port connections work.

54 files changed:
configs/ruby/MESI_Three_Level.py
configs/ruby/MESI_Two_Level.py
configs/ruby/MI_example.py
configs/ruby/MOESI_CMP_directory.py
configs/ruby/MOESI_CMP_token.py
configs/ruby/MOESI_hammer.py
configs/ruby/Network_test.py
configs/ruby/Ruby.py
src/mem/protocol/MESI_Three_Level-L0cache.sm
src/mem/protocol/MESI_Three_Level-L1cache.sm
src/mem/protocol/MESI_Two_Level-L1cache.sm
src/mem/protocol/MESI_Two_Level-L2cache.sm
src/mem/protocol/MESI_Two_Level-dir.sm
src/mem/protocol/MESI_Two_Level-dma.sm
src/mem/protocol/MI_example-cache.sm
src/mem/protocol/MI_example-dir.sm
src/mem/protocol/MI_example-dma.sm
src/mem/protocol/MOESI_CMP_directory-L1cache.sm
src/mem/protocol/MOESI_CMP_directory-L2cache.sm
src/mem/protocol/MOESI_CMP_directory-dir.sm
src/mem/protocol/MOESI_CMP_directory-dma.sm
src/mem/protocol/MOESI_CMP_token-L1cache.sm
src/mem/protocol/MOESI_CMP_token-L2cache.sm
src/mem/protocol/MOESI_CMP_token-dir.sm
src/mem/protocol/MOESI_CMP_token-dma.sm
src/mem/protocol/MOESI_hammer-cache.sm
src/mem/protocol/MOESI_hammer-dir.sm
src/mem/protocol/MOESI_hammer-dma.sm
src/mem/protocol/Network_test-cache.sm
src/mem/protocol/Network_test-dir.sm
src/mem/ruby/network/Network.cc
src/mem/ruby/network/Network.hh
src/mem/ruby/network/Network.py
src/mem/ruby/network/garnet/BaseGarnetNetwork.cc
src/mem/ruby/network/garnet/BaseGarnetNetwork.hh
src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh
src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh
src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.cc
src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh
src/mem/ruby/network/simple/PerfectSwitch.cc
src/mem/ruby/network/simple/PerfectSwitch.hh
src/mem/ruby/network/simple/SimpleNetwork.cc
src/mem/ruby/network/simple/SimpleNetwork.hh
src/mem/ruby/network/simple/Switch.cc
src/mem/ruby/network/simple/Switch.hh
src/mem/ruby/network/simple/Throttle.cc
src/mem/ruby/network/simple/Throttle.hh
src/mem/ruby/slicc_interface/AbstractController.cc
src/mem/ruby/slicc_interface/AbstractController.hh
src/mem/slicc/symbols/StateMachine.py
src/python/swig/pyobject.cc

index ee6ceccf9f0fb276b6910533756a77ecb94b07d5..1ddffc34a11a7a4201b5db76d3ba305fba9d1ee3 100644 (file)
@@ -129,7 +129,19 @@ def create_system(options, system, dma_ports, ruby_system):
             cpu_sequencers.append(cpu_seq)
             l0_cntrl_nodes.append(l0_cntrl)
             l1_cntrl_nodes.append(l1_cntrl)
-            l0_cntrl.peer = l1_cntrl
+
+            # Connect the L0 and L1 controllers
+            l0_cntrl.bufferToL1 = l1_cntrl.bufferFromL0
+            l0_cntrl.bufferFromL1 = l1_cntrl.bufferToL0
+
+            # Connect the L1 controllers and the network
+            l1_cntrl.requestToL2 =  ruby_system.network.slave
+            l1_cntrl.responseToL2 =  ruby_system.network.slave
+            l1_cntrl.unblockToL2 =  ruby_system.network.slave
+
+            l1_cntrl.requestFromL2 =  ruby_system.network.master
+            l1_cntrl.responseFromL2 =  ruby_system.network.master
+
 
         for j in xrange(num_l2caches_per_cluster):
             l2_cache = L2Cache(size = options.l2_size,
@@ -146,6 +158,15 @@ def create_system(options, system, dma_ports, ruby_system):
                         i * num_l2caches_per_cluster + j))
             l2_cntrl_nodes.append(l2_cntrl)
 
+            # Connect the L2 controllers and the network
+            l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
+            l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+            l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+            l2_cntrl.unblockToL2Cache = ruby_system.network.master
+            l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+            l2_cntrl.responseToL2Cache = ruby_system.network.master
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
@@ -183,6 +204,11 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controllers and the network
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.responseToDir = ruby_system.network.master
+        dir_cntrl.responseFromDir = ruby_system.network.slave
+
     for i, dma_port in enumerate(dma_ports):
         #
         # Create the Ruby objects associated with the dma controller
index c70c599c7a45df205f4124d3da23965c6f401ccc..8d75fe22e541e423c17f3a5a0c243a3e4055f04b 100644 (file)
@@ -108,12 +108,19 @@ def create_system(options, system, dma_ports, ruby_system):
         l1_cntrl.sequencer = cpu_seq
         exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
 
-        #
         # Add controllers and sequencers to the appropriate lists
-        #
         cpu_sequencers.append(cpu_seq)
         l1_cntrl_nodes.append(l1_cntrl)
 
+        # Connect the L1 controllers and the network
+        l1_cntrl.requestFromL1Cache =  ruby_system.network.slave
+        l1_cntrl.responseFromL1Cache =  ruby_system.network.slave
+        l1_cntrl.unblockFromL1Cache =  ruby_system.network.slave
+
+        l1_cntrl.requestToL1Cache =  ruby_system.network.master
+        l1_cntrl.responseToL1Cache =  ruby_system.network.master
+
+
     l2_index_start = block_size_bits + l2_bits
 
     for i in xrange(options.num_l2caches):
@@ -132,10 +139,21 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
         l2_cntrl_nodes.append(l2_cntrl)
 
+        # Connect the L2 controllers and the network
+        l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
+        l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+        l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+        l2_cntrl.unblockToL2Cache = ruby_system.network.master
+        l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+        l2_cntrl.responseToL2Cache = ruby_system.network.master
+
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
 
+
     # Run each of the ruby memory controllers at a ratio of the frequency of
     # the ruby system
     # clk_divider value is a fix to pass regression.
@@ -169,10 +187,14 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controllers and the network
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.responseToDir = ruby_system.network.master
+        dir_cntrl.responseFromDir = ruby_system.network.slave
+
+
     for i, dma_port in enumerate(dma_ports):
-        #
         # Create the Ruby objects associated with the dma controller
-        #
         dma_seq = DMASequencer(version = i,
                                ruby_system = ruby_system)
 
@@ -185,6 +207,11 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
         dma_cntrl_nodes.append(dma_cntrl)
 
+        # Connect the dma controller to the network
+        dma_cntrl.responseFromDir = ruby_system.network.master
+        dma_cntrl.requestToDir = ruby_system.network.slave
+
+
     all_cntrls = l1_cntrl_nodes + \
                  l2_cntrl_nodes + \
                  dir_cntrl_nodes + \
index 0124792506f787b6f0990afa1af3ec1ae9d07471..f671adbaaa9601c540fb93d19c90205ca99b8ed6 100644 (file)
@@ -94,12 +94,17 @@ def create_system(options, system, dma_ports, ruby_system):
         l1_cntrl.sequencer = cpu_seq
         exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
 
-        #
         # Add controllers and sequencers to the appropriate lists
-        #
         cpu_sequencers.append(cpu_seq)
         l1_cntrl_nodes.append(l1_cntrl)
 
+        # Connect the L1 controllers and the network
+        l1_cntrl.requestFromCache =  ruby_system.network.slave
+        l1_cntrl.responseFromCache =  ruby_system.network.slave
+        l1_cntrl.forwardToCache =  ruby_system.network.master
+        l1_cntrl.responseToCache =  ruby_system.network.master
+
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
@@ -139,6 +144,15 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controllers and the network
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+        dir_cntrl.responseFromDir = ruby_system.network.slave
+        dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+        dir_cntrl.forwardFromDir = ruby_system.network.slave
+
+
     for i, dma_port in enumerate(dma_ports):
         #
         # Create the Ruby objects associated with the dma controller
@@ -155,8 +169,11 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
         dma_cntrl_nodes.append(dma_cntrl)
 
-    all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+        # Connect the directory controllers and the network
+        dma_cntrl.requestToDir = ruby_system.network.master
+        dma_cntrl.responseFromDir = ruby_system.network.slave
 
-    topology = create_topology(all_cntrls, options)
 
+    all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+    topology = create_topology(all_cntrls, options)
     return (cpu_sequencers, dir_cntrl_nodes, topology)
index aa474209ff859a6b3708e56144319f303cb1afc5..d390efa0dda2ff4f304b0b310f24101a71cdb030 100644 (file)
@@ -104,12 +104,17 @@ def create_system(options, system, dma_ports, ruby_system):
         l1_cntrl.sequencer = cpu_seq
         exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
 
-        #
         # Add controllers and sequencers to the appropriate lists
-        #
         cpu_sequencers.append(cpu_seq)
         l1_cntrl_nodes.append(l1_cntrl)
 
+        # Connect the L1 controllers and the network
+        l1_cntrl.requestFromL1Cache =  ruby_system.network.slave
+        l1_cntrl.responseFromL1Cache =  ruby_system.network.slave
+        l1_cntrl.requestToL1Cache =  ruby_system.network.master
+        l1_cntrl.responseToL1Cache =  ruby_system.network.master
+
+
     l2_index_start = block_size_bits + l2_bits
 
     for i in xrange(options.num_l2caches):
@@ -128,10 +133,21 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
         l2_cntrl_nodes.append(l2_cntrl)
 
+        # Connect the L2 controllers and the network
+        l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
+        l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+        l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+        l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
+        l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+        l2_cntrl.responseToL2Cache = ruby_system.network.master
+
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
 
+
     # Run each of the ruby memory controllers at a ratio of the frequency of
     # the ruby system.
     # clk_divider value is a fix to pass regression.
@@ -164,6 +180,13 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controllers and the network
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.responseToDir = ruby_system.network.master
+        dir_cntrl.responseFromDir = ruby_system.network.slave
+        dir_cntrl.forwardFromDir = ruby_system.network.slave
+
+
     for i, dma_port in enumerate(dma_ports):
         #
         # Create the Ruby objects associated with the dma controller
@@ -180,11 +203,11 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
         dma_cntrl_nodes.append(dma_cntrl)
 
+
     all_cntrls = l1_cntrl_nodes + \
                  l2_cntrl_nodes + \
                  dir_cntrl_nodes + \
                  dma_cntrl_nodes
 
     topology = create_topology(all_cntrls, options)
-
     return (cpu_sequencers, dir_cntrl_nodes, topology)
index 36a5325749a6b16b3ca956348b393613c559058c..ef793530b99403e8980e6b852edef6a0d2efad49 100644 (file)
@@ -124,12 +124,20 @@ def create_system(options, system, dma_ports, ruby_system):
         l1_cntrl.sequencer = cpu_seq
         exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
 
-        #
         # Add controllers and sequencers to the appropriate lists
-        #
         cpu_sequencers.append(cpu_seq)
         l1_cntrl_nodes.append(l1_cntrl)
 
+        # Connect the L1 controllers and the network
+        l1_cntrl.requestFromL1Cache =  ruby_system.network.slave
+        l1_cntrl.responseFromL1Cache =  ruby_system.network.slave
+        l1_cntrl.persistentFromL1Cache =  ruby_system.network.slave
+
+        l1_cntrl.requestToL1Cache =  ruby_system.network.master
+        l1_cntrl.responseToL1Cache =  ruby_system.network.master
+        l1_cntrl.persistentToL1Cache =  ruby_system.network.master
+
+
     l2_index_start = block_size_bits + l2_bits
 
     for i in xrange(options.num_l2caches):
@@ -149,6 +157,17 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
         l2_cntrl_nodes.append(l2_cntrl)
 
+        # Connect the L2 controllers and the network
+        l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
+        l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+        l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+        l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
+        l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+        l2_cntrl.responseToL2Cache = ruby_system.network.master
+        l2_cntrl.persistentToL2Cache = ruby_system.network.master
+
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
@@ -186,6 +205,18 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controllers and the network
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.responseToDir = ruby_system.network.master
+        dir_cntrl.persistentToDir = ruby_system.network.master
+        dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+        dir_cntrl.requestFromDir = ruby_system.network.slave
+        dir_cntrl.responseFromDir = ruby_system.network.slave
+        dir_cntrl.persistentFromDir = ruby_system.network.slave
+        dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+
+
     for i, dma_port in enumerate(dma_ports):
         #
         # Create the Ruby objects associated with the dma controller
index de98fd0c2a4dc39f18d999915eff6ee598de5467..c13a6cc3a2e50b6f31968718aa9c773f5f2ce806 100644 (file)
@@ -119,12 +119,22 @@ def create_system(options, system, dma_ports, ruby_system):
             l1_cntrl.recycle_latency = options.recycle_latency
 
         exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
-        #
+
         # Add controllers and sequencers to the appropriate lists
-        #
         cpu_sequencers.append(cpu_seq)
         l1_cntrl_nodes.append(l1_cntrl)
 
+        # Connect the L1 controller and the network
+        # Connect the buffers from the controller to network
+        l1_cntrl.requestFromCache = ruby_system.network.slave
+        l1_cntrl.responseFromCache = ruby_system.network.slave
+        l1_cntrl.unblockFromCache = ruby_system.network.slave
+
+        # Connect the buffers from the network to the controller
+        l1_cntrl.forwardToCache = ruby_system.network.master
+        l1_cntrl.responseToCache = ruby_system.network.master
+
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
@@ -198,6 +208,17 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controller to the network
+        dir_cntrl.forwardFromDir = ruby_system.network.slave
+        dir_cntrl.responseFromDir = ruby_system.network.slave
+        dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+
+        dir_cntrl.unblockToDir = ruby_system.network.master
+        dir_cntrl.responseToDir = ruby_system.network.master
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+
     for i, dma_port in enumerate(dma_ports):
         #
         # Create the Ruby objects associated with the dma controller
@@ -217,7 +238,11 @@ def create_system(options, system, dma_ports, ruby_system):
         if options.recycle_latency:
             dma_cntrl.recycle_latency = options.recycle_latency
 
+        # Connect the dma controller to the network
+        dma_cntrl.responseFromDir = ruby_system.network.slave
+        dma_cntrl.requestToDir = ruby_system.network.master
+
+
     all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
     topology = create_topology(all_cntrls, options)
-
     return (cpu_sequencers, dir_cntrl_nodes, topology)
index 553927bb7332196a93042e84ce31c13019732de8..7e4379c0de5feea54af8fb136bd689d4424345ea 100644 (file)
@@ -91,12 +91,16 @@ def create_system(options, system, dma_ports, ruby_system):
         l1_cntrl.sequencer = cpu_seq
         exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
 
-        #
         # Add controllers and sequencers to the appropriate lists
-        #
         cpu_sequencers.append(cpu_seq)
         l1_cntrl_nodes.append(l1_cntrl)
 
+        # Connect the L1 controllers and the network
+        l1_cntrl.requestFromCache =  ruby_system.network.slave
+        l1_cntrl.responseFromCache =  ruby_system.network.slave
+        l1_cntrl.forwardFromCache =  ruby_system.network.slave
+
+
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(phys_mem_size % options.num_dirs == 0)
     mem_module_size = phys_mem_size / options.num_dirs
@@ -114,6 +118,12 @@ def create_system(options, system, dma_ports, ruby_system):
         exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
         dir_cntrl_nodes.append(dir_cntrl)
 
+        # Connect the directory controllers and the network
+        dir_cntrl.requestToDir = ruby_system.network.master
+        dir_cntrl.forwardToDir = ruby_system.network.master
+        dir_cntrl.responseToDir = ruby_system.network.master
+
+
     all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes
     topology = create_topology(all_cntrls, options)
     return (cpu_sequencers, dir_cntrl_nodes, topology)
index d9517456bd566d263a6715dc9db8d41c3771f962..3c43fa6c6ab7b2583830352c954a5cc2bd02d152 100644 (file)
@@ -106,31 +106,7 @@ def create_system(options, system, piobus = None, dma_ports = []):
     system.ruby = RubySystem(no_mem_vec = options.use_map)
     ruby = system.ruby
 
-    protocol = buildEnv['PROTOCOL']
-    exec "import %s" % protocol
-    try:
-        (cpu_sequencers, dir_cntrls, topology) = \
-             eval("%s.create_system(options, system, dma_ports, ruby)"
-                  % protocol)
-    except:
-        print "Error: could not create sytem for ruby protocol %s" % protocol
-        raise
-
-    # Create a port proxy for connecting the system port. This is
-    # independent of the protocol and kept in the protocol-agnostic
-    # part (i.e. here).
-    sys_port_proxy = RubyPortProxy(ruby_system = ruby)
-    # Give the system port proxy a SimObject parent without creating a
-    # full-fledged controller
-    system.sys_port_proxy = sys_port_proxy
-
-    # Connect the system port for loading of binaries etc
-    system.system_port = system.sys_port_proxy.slave
-
-
-    #
     # Set the network classes based on the command line options
-    #
     if options.garnet_network == "fixed":
         NetworkClass = GarnetNetwork_d
         IntLinkClass = GarnetIntLink_d
@@ -152,10 +128,34 @@ def create_system(options, system, piobus = None, dma_ports = []):
         RouterClass = Switch
         InterfaceClass = None
 
+    # Instantiate the network object so that the controllers can connect to it.
+    network = NetworkClass(ruby_system = ruby, topology = options.topology,
+            routers = [], ext_links = [], int_links = [], netifs = [])
+    ruby.network = network
+
+    protocol = buildEnv['PROTOCOL']
+    exec "import %s" % protocol
+    try:
+        (cpu_sequencers, dir_cntrls, topology) = \
+             eval("%s.create_system(options, system, dma_ports, ruby)"
+                  % protocol)
+    except:
+        print "Error: could not create sytem for ruby protocol %s" % protocol
+        raise
+
+    # Create a port proxy for connecting the system port. This is
+    # independent of the protocol and kept in the protocol-agnostic
+    # part (i.e. here).
+    sys_port_proxy = RubyPortProxy(ruby_system = ruby)
+
+    # Give the system port proxy a SimObject parent without creating a
+    # full-fledged controller
+    system.sys_port_proxy = sys_port_proxy
+
+    # Connect the system port for loading of binaries etc
+    system.system_port = system.sys_port_proxy.slave
 
     # Create the network topology
-    network = NetworkClass(ruby_system = ruby, topology = topology.description,
-            routers = [], ext_links = [], int_links = [], netifs = [])
     topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
             RouterClass)
 
@@ -168,14 +168,12 @@ def create_system(options, system, piobus = None, dma_ports = []):
         network.enable_fault_model = True
         network.fault_model = FaultModel()
 
-    #
     # Loop through the directory controlers.
     # Determine the total memory size of the ruby system and verify it is equal
     # to physmem.  However, if Ruby memory is using sparse memory in SE
     # mode, then the system should not back-up the memory state with
     # the Memory Vector and thus the memory size bytes should stay at 0.
     # Also set the numa bits to the appropriate values.
-    #
     total_mem_size = MemorySize('0B')
 
     ruby.block_size_bytes = options.cacheline_size
@@ -196,8 +194,6 @@ def create_system(options, system, piobus = None, dma_ports = []):
 
     phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
     assert(total_mem_size.value == phys_mem_size)
-
-    ruby.network = network
     ruby.mem_size = total_mem_size
 
     # Connect the cpu sequencers and the piobus
index f707ba96338d6f9c4477cca0f0bc7bca15190a6f..49b6aa7a96bed62df46b479f1e10d472f43b8bc7 100644 (file)
@@ -33,14 +33,13 @@ machine(L0Cache, "MESI Directory L0 Cache")
    Cycles request_latency := 2;
    Cycles response_latency := 2;
    bool send_evictions;
-{
-  // NODE L0 CACHE
-  // From this node's L0 cache to the network
-  MessageBuffer bufferToL1, network="To", physical_network="0", ordered="true";
 
-  // To this node's L0 cache FROM the network
-  MessageBuffer bufferFromL1, network="From", physical_network="0", ordered="true";
+   // From this node's L0 cache to the network
+   MessageBuffer * bufferToL1, network="To", ordered="true";
 
+   // To this node's L0 cache FROM the network
+   MessageBuffer * bufferFromL1, network="From", ordered="true";
+{
   // Message queue between this controller and the processor
   MessageBuffer mandatoryQueue, ordered="false";
 
index 170599a51852242ee1f806ce863a2dbe500ef24f..59249d8229cd574cd32305451039e6584f4d05c9 100644 (file)
@@ -32,26 +32,30 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
    Cycles l1_request_latency := 2;
    Cycles l1_response_latency := 2;
    Cycles to_l2_latency := 1;
-{
-  // From this node's L1 cache TO the network
-  // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
-  MessageBuffer requestToL2, network="To", virtual_network="0", ordered="false", vnet_type="request";
-  // a local L1 -> this L2 bank
-  MessageBuffer responseToL2, network="To", virtual_network="1", ordered="false", vnet_type="response";
-  MessageBuffer unblockToL2, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
-
-  // To this node's L1 cache FROM the network
-  // a L2 bank -> this L1
-  MessageBuffer requestFromL2, network="From", virtual_network="0", ordered="false", vnet_type="request";
-  // a L2 bank -> this L1
-  MessageBuffer responseFromL2, network="From", virtual_network="1", ordered="false", vnet_type="response";
-
-  // Message Buffers between the L1 and the L0 Cache
-  // From the L1 cache to the L0 cache
-  MessageBuffer bufferToL0, network="To", physical_network="0", ordered="true";
-  // From the L0 cache to the L1 cache
-  MessageBuffer bufferFromL0, network="From", physical_network="0", ordered="true";
 
+   // Message Buffers between the L1 and the L0 Cache
+   // From the L1 cache to the L0 cache
+   MessageBuffer * bufferToL0, network="To", ordered="true";
+
+   // From the L0 cache to the L1 cache
+   MessageBuffer * bufferFromL0, network="From", ordered="true";
+
+   // Message queue from this L1 cache TO the network / L2
+   MessageBuffer * requestToL2, network="To", virtual_network="0",
+        ordered="false", vnet_type="request";
+
+   MessageBuffer * responseToL2, network="To", virtual_network="1",
+        ordered="false", vnet_type="response";
+   MessageBuffer * unblockToL2, network="To", virtual_network="2",
+        ordered="false", vnet_type="unblock";
+
+   // To this L1 cache FROM the network / L2
+   MessageBuffer * requestFromL2, network="From", virtual_network="2",
+        ordered="false", vnet_type="request";
+   MessageBuffer * responseFromL2, network="From", virtual_network="1",
+        ordered="false", vnet_type="response";
+
+{
   // STATES
   state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
     // Base states
index 96c1699b73cd4d93ff5b2ec8b2173db7784228fe..6c98c23e98a13b5804fafc7d17c42cfc728f67bb 100644 (file)
@@ -37,25 +37,34 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
    Cycles to_l2_latency := 1;
    bool send_evictions;
    bool enable_prefetch := "False";
+
+   // Message Queues
+   // From this node's L1 cache TO the network
+
+   // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+   MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+        ordered="false", vnet_type="request";
+
+   // a local L1 -> this L2 bank
+   MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
+        ordered="false", vnet_type="response";
+
+   MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
+        ordered="false", vnet_type="unblock";
+
+
+   // To this node's L1 cache FROM the network
+   // a L2 bank -> this L1
+   MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
+        ordered="false", vnet_type="request";
+
+   // a L2 bank -> this L1
+   MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
+        ordered="false", vnet_type="response";
 {
-  // NODE L1 CACHE
-  // From this node's L1 cache TO the network
-  // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
-  MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
-  // a local L1 -> this L2 bank
-  MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="response";
-  MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
-
-
-  // To this node's L1 cache FROM the network
-  // a L2 bank -> this L1
-  MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
-  // a L2 bank -> this L1
-  MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="response";
   // Request Buffer for prefetches
   MessageBuffer optionalQueue, ordered="false";
 
-
   // STATES
   state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
     // Base states
index f191ddccbdf298444b85efaa5efa5d85a8558669..9e0522ea2e564dc0b714262c3aa894d08d37b020 100644 (file)
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/*
- * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
- *
- */
-
 machine(L2Cache, "MESI Directory L2 Cache CMP")
  : CacheMemory * L2cache;
    Cycles l2_request_latency := 2;
    Cycles l2_response_latency := 2;
    Cycles to_l1_latency := 1;
-{
-  // L2 BANK QUEUES
+
+  // Message Queues
   // From local bank of L2 cache TO the network
-  MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0",
+  MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0",
     ordered="false", vnet_type="request";  // this L2 bank -> Memory
-  MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0",
+
+  MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="2",
     ordered="false", vnet_type="request";  // this L2 bank -> a local L1
-  MessageBuffer responseFromL2Cache, network="To", virtual_network="1",
+
+  MessageBuffer * responseFromL2Cache, network="To", virtual_network="1",
     ordered="false", vnet_type="response";  // this L2 bank -> a local L1 || Memory
 
   // FROM the network to this local bank of L2 cache
-  MessageBuffer unblockToL2Cache, network="From", virtual_network="2",
+  MessageBuffer unblockToL2Cache, network="From", virtual_network="2",
     ordered="false", vnet_type="unblock";  // a local L1 || Memory -> this L2 bank
-  MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0",
+
+  MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
     ordered="false", vnet_type="request";  // a local L1 -> this L2 bank
-  MessageBuffer responseToL2Cache, network="From", virtual_network="1",
-    ordered="false", vnet_type="response";  // a local L1 || Memory -> this L2 bank
 
+  MessageBuffer * responseToL2Cache, network="From", virtual_network="1",
+    ordered="false", vnet_type="response";  // a local L1 || Memory -> this L2 bank
+{
   // STATES
   state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
     // Base states
index 679f2dee75afd336f3f679d1c567efd4c1138b47..dd0ecf49e00e3450b57bbaa3d3d77e905cdbcdad 100644 (file)
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-// This file is copied from Yasuko Watanabe's prefetch / memory protocol
-// Copied here by aep 12/14/07
-
-
 machine(Directory, "MESI Two Level directory protocol")
  : DirectoryMemory * directory;
    MemoryControl * memBuffer;
    Cycles to_mem_ctrl_latency := 1;
    Cycles directory_latency := 6;
-{
-  MessageBuffer requestToDir, network="From", virtual_network="0",
+
+   MessageBuffer * requestToDir, network="From", virtual_network="0",
         ordered="false", vnet_type="request";
-  MessageBuffer responseToDir, network="From", virtual_network="1",
+   MessageBuffer * responseToDir, network="From", virtual_network="1",
         ordered="false", vnet_type="response";
-  MessageBuffer responseFromDir, network="To", virtual_network="1",
+   MessageBuffer * responseFromDir, network="To", virtual_network="1",
         ordered="false", vnet_type="response";
-
+{
   // STATES
   state_declaration(State, desc="Directory states", default="Directory_State_I") {
     // Base states
index 80c70c80ac8ce7e95e2a7247b001294798784510..e318326202dc864946c24b5283b23ead2cb473ea 100644 (file)
 machine(DMA, "DMA Controller")
 : DMASequencer * dma_sequencer;
   Cycles request_latency := 6;
-{
-
-  MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
-  MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
 
+  MessageBuffer * responseFromDir, network="From", virtual_network="1",
+        ordered="true", vnet_type="response";
+  MessageBuffer * requestToDir, network="To", virtual_network="0",
+        ordered="false", vnet_type="request";
+{
   state_declaration(State, desc="DMA states", default="DMA_State_READY") {
     READY, AccessPermission:Invalid, desc="Ready to accept a new request";
     BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
@@ -74,7 +75,7 @@ machine(DMA, "DMA Controller")
     error("DMA does not support get data block.");
   }
 
-  out_port(reqToDirectory_out, RequestMsg, reqToDirectory, desc="...");
+  out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
     if (dmaRequestQueue_in.isReady()) {
@@ -106,7 +107,7 @@ machine(DMA, "DMA Controller")
 
   action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
     peek(dmaRequestQueue_in, SequencerMsg) {
-      enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+      enqueue(requestToDir_out, RequestMsg, request_latency) {
         out_msg.Addr := in_msg.PhysicalAddress;
         out_msg.Type := CoherenceRequestType:DMA_READ;
         out_msg.DataBlk := in_msg.DataBlk;
@@ -119,7 +120,7 @@ machine(DMA, "DMA Controller")
 
   action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
     peek(dmaRequestQueue_in, SequencerMsg) {
-      enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+      enqueue(requestToDir_out, RequestMsg, request_latency) {
           out_msg.Addr := in_msg.PhysicalAddress;
           out_msg.Type := CoherenceRequestType:DMA_WRITE;
           out_msg.DataBlk := in_msg.DataBlk;
index 9b0c18bc82e7618f464b07c55eb8f19725f38c7d..ee774f4c24bdfec3e7455eca45d08a154bd143e3 100644 (file)
  */
 
 machine(L1Cache, "MI Example L1 Cache")
-: Sequencer * sequencer;
-  CacheMemory * cacheMemory;
-  Cycles cache_response_latency := 12;
-  Cycles issue_latency := 2;
-  bool send_evictions;
+    : Sequencer * sequencer;
+      CacheMemory * cacheMemory;
+      Cycles cache_response_latency := 12;
+      Cycles issue_latency := 2;
+      bool send_evictions;
+
+      // NETWORK BUFFERS
+      MessageBuffer * requestFromCache, network="To", virtual_network="2",
+            ordered="true", vnet_type="request";
+      MessageBuffer * responseFromCache, network="To", virtual_network="4",
+            ordered="true", vnet_type="response";
+
+      MessageBuffer * forwardToCache, network="From", virtual_network="3",
+            ordered="true", vnet_type="forward";
+      MessageBuffer * responseToCache, network="From", virtual_network="4",
+            ordered="true", vnet_type="response";
 {
-
-  // NETWORK BUFFERS
-  MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
-  MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
-
-  MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
-  MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
-
   // STATES
   state_declaration(State, desc="Cache states") {
     I, AccessPermission:Invalid, desc="Not Present/Invalid";
index f0d85cba8d95bc29529585ca2a3a92696f2233ed..cd12e3eb7f70269826adee922fd8c674dea78003 100644 (file)
  */
 
 machine(Directory, "Directory protocol") 
-: DirectoryMemory * directory;
-  MemoryControl * memBuffer;
-  Cycles directory_latency := 12;
+    : DirectoryMemory * directory;
+      MemoryControl * memBuffer;
+      Cycles directory_latency := 12;
+
+      MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+            ordered="false", vnet_type="forward";
+      MessageBuffer * responseFromDir, network="To", virtual_network="4",
+            ordered="false", vnet_type="response";
+      MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+            ordered="true", vnet_type="response";
+
+      MessageBuffer * requestToDir, network="From", virtual_network="2",
+            ordered="true", vnet_type="request";
+      MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+            ordered="true", vnet_type="request";
 {
-
-  MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
-  MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
-
-  MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
-  MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
-
   // STATES
   state_declaration(State, desc="Directory states", default="Directory_State_I") {
     // Base states
index 14b8c4e4a274b20a95f3a64fea4944413d68dbc3..e328d9e202b76e3891e353987904055426b25690 100644 (file)
  */
 
 machine(DMA, "DMA Controller") 
-: DMASequencer * dma_sequencer;
-  Cycles request_latency := 6;
-{
-
-  MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
-  MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+    : DMASequencer * dma_sequencer;
+      Cycles request_latency := 6;
 
+      MessageBuffer * responseFromDir, network="From", virtual_network="1",
+            ordered="true", vnet_type="response";
+      MessageBuffer * requestToDir, network="To", virtual_network="0",
+            ordered="false", vnet_type="request";
+{
   state_declaration(State, desc="DMA states", default="DMA_State_READY") {
     READY, AccessPermission:Invalid, desc="Ready to accept a new request";
     BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
@@ -69,7 +70,7 @@ machine(DMA, "DMA Controller")
     error("DMA Controller does not support getDataBlock function.\n");
   }
 
-  out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+  out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
     if (dmaRequestQueue_in.isReady()) {
@@ -101,7 +102,7 @@ machine(DMA, "DMA Controller")
 
   action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
     peek(dmaRequestQueue_in, SequencerMsg) {
-      enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+      enqueue(requestToDir_out, DMARequestMsg, request_latency) {
         out_msg.PhysicalAddress := in_msg.PhysicalAddress;
         out_msg.LineAddress := in_msg.LineAddress; 
         out_msg.Type := DMARequestType:READ;
@@ -116,7 +117,7 @@ machine(DMA, "DMA Controller")
 
   action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
     peek(dmaRequestQueue_in, SequencerMsg) {
-      enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+      enqueue(requestToDir_out, DMARequestMsg, request_latency) {
           out_msg.PhysicalAddress := in_msg.PhysicalAddress;
           out_msg.LineAddress := in_msg.LineAddress; 
           out_msg.Type := DMARequestType:WRITE;
index fb74a67e4dce3204aae9fc95221ab184257cc01a..3cd87616f9a1c7ad7b1176778b438d25d287a182 100644 (file)
@@ -34,25 +34,24 @@ machine(L1Cache, "Directory protocol")
    Cycles request_latency := 2;
    Cycles use_timeout_latency := 50;
    bool send_evictions;
-{
-
-  // NODE L1 CACHE
-  // From this node's L1 cache TO the network
-  // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
-  MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
-  // a local L1 -> this L2 bank
-  MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
-//  MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
-
-
-  // To this node's L1 cache FROM the network
-  // a L2 bank -> this L1
-  MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
-  // a L2 bank -> this L1
-  MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
-
-
 
+   // Message Queues
+   // From this node's L1 cache TO the network
+   // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+   MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+        ordered="false", vnet_type="request";
+   // a local L1 -> this L2 bank
+   MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
+        ordered="false", vnet_type="response";
+
+   // To this node's L1 cache FROM the network
+   // a L2 bank -> this L1
+   MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
+        ordered="false", vnet_type="request";
+   // a L2 bank -> this L1
+   MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
+        ordered="false", vnet_type="response";
+{
   // STATES
   state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
     // Base states
index 7d81f41644c277396f4deaa5bec6c9858d123c0a..46fd12a3a089a4795b88db714d1f57616e967d94 100644 (file)
@@ -30,20 +30,25 @@ machine(L2Cache, "Token protocol")
 : CacheMemory * L2cache;
   Cycles response_latency := 2;
   Cycles request_latency := 2;
-{
 
   // L2 BANK QUEUES
   // From local bank of L2 cache TO the network
-  MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";  // this L2 bank -> a local L1
-  MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";  // this L2 bank -> mod-directory
-  MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";  // this L2 bank -> a local L1 || mod-directory
+  MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="0",
+        ordered="false", vnet_type="request";  // this L2 bank -> a local L1
+  MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="1",
+        ordered="false", vnet_type="request";  // this L2 bank -> mod-directory
+  MessageBuffer * responseFromL2Cache, network="To", virtual_network="2",
+        ordered="false", vnet_type="response";  // this L2 bank -> a local L1 || mod-directory
 
   // FROM the network to this local bank of L2 cache
-  MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";  // a local L1 -> this L2 bank, Lets try this???
-  MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";  // mod-directory -> this L2 bank
-  MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";  // a local L1 || mod-directory -> this L2 bank
-//  MessageBuffer L1WritebackToL2Cache, network="From", virtual_network="3", ordered="false", vnet_type="writeback";
+  MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
+        ordered="false", vnet_type="request";  // a local L1 -> this L2 bank, Lets try this???
+  MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="1",
+        ordered="false", vnet_type="request";  // mod-directory -> this L2 bank
+  MessageBuffer * responseToL2Cache, network="From", virtual_network="2",
+        ordered="false", vnet_type="response";  // a local L1 || mod-directory -> this L2 bank
 
+{
   // STATES
   state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
 
index b403bc91c6f1be8749cea0745e69bc5c1d9cbef0..272a8c9abb135b56eccfa743563f12b6892038d5 100644 (file)
@@ -30,16 +30,19 @@ machine(Directory, "Directory protocol")
 :  DirectoryMemory * directory;
    MemoryControl * memBuffer;
    Cycles directory_latency := 6;
-{
-
-  // ** IN QUEUES **
-  MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request";  // a mod-L2 bank -> this Dir
-  MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response";  // a mod-L2 bank -> this Dir
 
-  MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
-  MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response";  // Dir -> mod-L2 bank
+   // Message Queues
+   MessageBuffer * requestToDir, network="From", virtual_network="1",
+        ordered="false", vnet_type="request";  // a mod-L2 bank -> this Dir
+   MessageBuffer * responseToDir, network="From", virtual_network="2",
+        ordered="false", vnet_type="response";  // a mod-L2 bank -> this Dir
 
+   MessageBuffer * forwardFromDir, network="To", virtual_network="1",
+        ordered="false", vnet_type="forward";
+   MessageBuffer * responseFromDir, network="To", virtual_network="2",
+        ordered="false", vnet_type="response";  // Dir -> mod-L2 bank
 
+{
   // STATES
   state_declaration(State, desc="Directory states", default="Directory_State_I") {
     // Base states
index 1a8b3aea94732600171de8e94636eb664363d4ca..767a51a1f447fc0a20732cb22662bc0824e69526 100644 (file)
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-machine(DMA, "DMA Controller") 
-: DMASequencer * dma_sequencer;
-  Cycles request_latency := 14;
-  Cycles response_latency := 14;
-{
-  MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
+machine(DMA, "DMA Controller")
+    : DMASequencer * dma_sequencer;
+      Cycles request_latency := 14;
+      Cycles response_latency := 14;
+
+      MessageBuffer * responseFromDir, network="From", virtual_network="2",
+            ordered="false", vnet_type="response";
+
+      MessageBuffer * reqToDir, network="To", virtual_network="1",
+            ordered="false", vnet_type="request";
+      MessageBuffer * respToDir, network="To", virtual_network="2",
+            ordered="false", vnet_type="dmaresponse";
 
-  MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
-  MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
-  
+{
   state_declaration(State, desc="DMA states", default="DMA_State_READY") {
     READY, AccessPermission:Invalid, desc="Ready to accept a new request";
     BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
     BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
   }
-  
+
   enumeration(Event, desc="DMA events") {
     ReadRequest,  desc="A new read request";
     WriteRequest, desc="A new write request";
@@ -293,7 +297,7 @@ machine(DMA, "DMA Controller")
   }
 
   transition(BUSY_WR, All_Acks, READY) {
-    a_ackCallback; 
+    a_ackCallback;
     u_sendExclusiveUnblockToDir;
     w_deallocateTBE;
     p_popTriggerQueue;
index b1197780f718dee9f4c3b8c98e2fe7c0ee6e247c..86074438470bf47881bee09544e8d52918b81176 100644 (file)
@@ -48,24 +48,32 @@ machine(L1Cache, "Token protocol")
    bool dynamic_timeout_enabled := "True";
    bool no_mig_atomic := "True";
    bool send_evictions;
-{
-
-  // From this node's L1 cache TO the network
-
-  // a local L1 -> this L2 bank
-  MessageBuffer responseFromL1Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
-  // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
-  MessageBuffer requestFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
 
+   // Message Queues
+   // From this node's L1 cache TO the network
+   // a local L1 -> this L2 bank
+   MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
+        ordered="false", vnet_type="response";
+   MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
+        ordered="true", vnet_type="persistent";
+   // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+   MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
+        ordered="false", vnet_type="request";
+   // To this node's L1 cache FROM the network
+
+   // a L2 bank -> this L1
+   MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
+        ordered="false", vnet_type="response";
+   MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
+        ordered="true", vnet_type="persistent";
+   // a L2 bank -> this L1
+   MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
+        ordered="false", vnet_type="request";
 
-  // To this node's L1 cache FROM the network
-  // a L2 bank -> this L1
-  MessageBuffer responseToL1Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
-  // a L2 bank -> this L1
-  MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
-
+{
   // STATES
   state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
     // Base states
index f8bd0169595885e7cb0194b30c83b7f9caa35cf0..a2488066a4c851c55c3806ae47f4751010268fec 100644 (file)
@@ -32,29 +32,36 @@ machine(L2Cache, "Token protocol")
    Cycles l2_request_latency := 5;
    Cycles l2_response_latency := 5;
    bool filtering_enabled := "True";
-{
-
-  // L2 BANK QUEUES
-  // From local bank of L2 cache TO the network
-
-  // this L2 bank -> a local L1 || mod-directory
-  MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
-  // this L2 bank -> mod-directory
-  MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
-  // this L2 bank -> a local L1
-  MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
 
+   // L2 BANK QUEUES
+   // From local bank of L2 cache TO the network
+   // this L2 bank -> a local L1 || mod-directory
+   MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
+        ordered="false", vnet_type="response";
+   // this L2 bank -> mod-directory
+   MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
+        ordered="false", vnet_type="request";
+   // this L2 bank -> a local L1
+   MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
+        ordered="false", vnet_type="request";
+   // FROM the network to this local bank of L2 cache
+   // a local L1 || mod-directory -> this L2 bank
+   MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
+        ordered="false", vnet_type="response";
+   MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
+        ordered="true", vnet_type="persistent";
+   // mod-directory -> this L2 bank
+   MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
+        ordered="false", vnet_type="request";
+   // a local L1 -> this L2 bank
+   MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
+        ordered="false", vnet_type="request";
 
-  // FROM the network to this local bank of L2 cache
-
-  // a local L1 || mod-directory -> this L2 bank
-  MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
-  // mod-directory -> this L2 bank
-  MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
-  // a local L1 -> this L2 bank
-  MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
-
+{
   // STATES
   state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
     // Base states
index 5cb29fcc241920e1ca2706419380d6bdb2d1482a..be5df02e064e9519a268b7df622e069ef5858a9f 100644 (file)
@@ -34,18 +34,34 @@ machine(Directory, "Token protocol")
    bool distributed_persistent := "True";
    Cycles fixed_timeout_latency := 100;
    Cycles reissue_wakeup_latency := 10;
-{
 
-  MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
-  MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
-  MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
+   // Message Queues from dir to other controllers / network
+   MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
+        ordered="true", vnet_type="response";
+
+   MessageBuffer * responseFromDir, network="To", virtual_network="4",
+        ordered="false", vnet_type="response";
+
+   MessageBuffer * persistentFromDir, network="To", virtual_network="3",
+        ordered="true", vnet_type="persistent";
 
-  MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
-  MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
-  MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
+   MessageBuffer * requestFromDir, network="To", virtual_network="1",
+        ordered="false", vnet_type="request";
+   // Message Queues to dir from other controllers / network
+   MessageBuffer * responseToDir, network="From", virtual_network="4",
+        ordered="false", vnet_type="response";
 
+   MessageBuffer * persistentToDir, network="From", virtual_network="3",
+        ordered="true", vnet_type="persistent";
+   
+   MessageBuffer * requestToDir, network="From", virtual_network="2",
+        ordered="false", vnet_type="request";
+   
+   MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+        ordered="true", vnet_type="request";
+
+{
   // STATES
   state_declaration(State, desc="Directory states", default="Directory_State_O") {
     // Base states
index 441a001fc58104607589cf42981fd32f4744249a..72b0e52a5b75f00f713a4a324967d0157800d92a 100644 (file)
 
 
 machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
-  Cycles request_latency := 6;
-{
+    : DMASequencer * dma_sequencer;
+      Cycles request_latency := 6;
 
-  MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", vnet_type="response";
-  MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+      // Messsage Queues
+      MessageBuffer * responseFromDir, network="From", virtual_network="5",
+            ordered="true", vnet_type="response";
+      MessageBuffer * reqToDirectory, network="To", virtual_network="0",
+            ordered="false", vnet_type="request";
 
+{
   state_declaration(State, desc="DMA states", default="DMA_State_READY") {
     READY, AccessPermission:Invalid, desc="Ready to accept a new request";
     BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
index 7c150bda0ff57218718024dbdcf9b296443c85b7..de502e118599f57a0919be2757e8f8619099e293 100644 (file)
  */
 
 machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
-: Sequencer * sequencer;
-  CacheMemory * L1Icache;
-  CacheMemory * L1Dcache;
-  CacheMemory * L2cache;
-  Cycles cache_response_latency := 10;
-  Cycles issue_latency := 2;
-  Cycles l2_cache_hit_latency := 10;
-  bool no_mig_atomic := "True";
-  bool send_evictions;
+    : Sequencer * sequencer;
+      CacheMemory * L1Icache;
+      CacheMemory * L1Dcache;
+      CacheMemory * L2cache;
+      Cycles cache_response_latency := 10;
+      Cycles issue_latency := 2;
+      Cycles l2_cache_hit_latency := 10;
+      bool no_mig_atomic := "True";
+      bool send_evictions;
+
+      // NETWORK BUFFERS
+      MessageBuffer * requestFromCache, network="To", virtual_network="2",
+            ordered="false", vnet_type="request";
+      MessageBuffer * responseFromCache, network="To", virtual_network="4",
+            ordered="false", vnet_type="response";
+      MessageBuffer * unblockFromCache, network="To", virtual_network="5",
+            ordered="false", vnet_type="unblock";
+
+      MessageBuffer * forwardToCache, network="From", virtual_network="3",
+            ordered="false", vnet_type="forward";
+      MessageBuffer * responseToCache, network="From", virtual_network="4",
+            ordered="false", vnet_type="response";
 {
-
-  // NETWORK BUFFERS
-  MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
-  MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
-
-  MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
-  MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
-
-
   // STATES
   state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
     // Base states
index 4e2f846e26178bde8bd80e9beb8bd047ef79de92..db11b290f7f223942891f42a7c07d9d0ce9b5dda 100644 (file)
  */
 
 machine(Directory, "AMD Hammer-like protocol") 
-: DirectoryMemory * directory;
-  CacheMemory * probeFilter;
-  MemoryControl * memBuffer;
-  Cycles memory_controller_latency := 2;
-  bool probe_filter_enabled := "False";
-  bool full_bit_dir_enabled := "False";
-{
+    : DirectoryMemory * directory;
+      CacheMemory * probeFilter;
+      MemoryControl * memBuffer;
+      Cycles memory_controller_latency := 2;
+      bool probe_filter_enabled := "False";
+      bool full_bit_dir_enabled := "False";
 
-  MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
-  MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
-  //
-  // For a finite buffered network, note that the DMA response network only 
-  // works at this relatively lower numbered (lower priority) virtual network
-  // because the trigger queue decouples cache responses from DMA responses.
-  //
-  MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
+      MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+            ordered="false", vnet_type="forward";
+
+      MessageBuffer * responseFromDir, network="To", virtual_network="4",
+            ordered="false", vnet_type="response";
 
-  MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
-  MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
-  MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
-  MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
+      // For a finite buffered network, note that the DMA response network only 
+      // works at this relatively lower numbered (lower priority) virtual network
+      // because the trigger queue decouples cache responses from DMA responses.
+      MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+            ordered="true", vnet_type="response";
 
+      MessageBuffer * unblockToDir, network="From", virtual_network="5",
+            ordered="false", vnet_type="unblock";
+
+      MessageBuffer * responseToDir, network="From", virtual_network="4",
+            ordered="false", vnet_type="response";
+
+      MessageBuffer * requestToDir, network="From", virtual_network="2",
+            ordered="false", vnet_type="request", recycle_latency="1";
+
+      MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+            ordered="true", vnet_type="request";
+{
   // STATES
   state_declaration(State, desc="Directory states", default="Directory_State_E") {
     // Base states
index e4d26bb48be08251b84fdaa1b23e7b4d46b59c09..ab41adb4df91a5be556f58b487f1f9202b9b9643 100644 (file)
 
 
 machine(DMA, "DMA Controller") 
-: DMASequencer * dma_sequencer;
-  Cycles request_latency := 6;
-{
-
-  MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
-  MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+    : DMASequencer * dma_sequencer;
+      Cycles request_latency := 6;
 
-  state_declaration(State, 
-                    desc="DMA states", 
-                    default="DMA_State_READY") {
+      MessageBuffer * responseFromDir, network="From", virtual_network="1",
+            ordered="true", vnet_type="response";
+      MessageBuffer * requestToDir, network="To", virtual_network="0",
+            ordered="false", vnet_type="request";
+{
+  state_declaration(State, desc="DMA states", default="DMA_State_READY") {
     READY, AccessPermission:Invalid, desc="Ready to accept a new request";
     BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
     BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
@@ -71,7 +70,7 @@ machine(DMA, "DMA Controller")
     error("DMA Controller does not support getDataBlock function.\n");
   }
 
-  out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+  out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
     if (dmaRequestQueue_in.isReady()) {
@@ -103,7 +102,7 @@ machine(DMA, "DMA Controller")
 
   action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
     peek(dmaRequestQueue_in, SequencerMsg) {
-      enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+      enqueue(requestToDir_out, DMARequestMsg, request_latency) {
         out_msg.PhysicalAddress := in_msg.PhysicalAddress;
         out_msg.LineAddress := in_msg.LineAddress; 
         out_msg.Type := DMARequestType:READ;
@@ -118,7 +117,7 @@ machine(DMA, "DMA Controller")
 
   action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
     peek(dmaRequestQueue_in, SequencerMsg) {
-      enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+      enqueue(requestToDir_out, DMARequestMsg, request_latency) {
           out_msg.PhysicalAddress := in_msg.PhysicalAddress;
           out_msg.LineAddress := in_msg.LineAddress; 
           out_msg.Type := DMARequestType:WRITE;
index f69aecd933330407be8196d3ba3695988f327dfe..e0307152d2c83119a176b3583ac594388b9d343c 100644 (file)
 
 
 machine(L1Cache, "Network_test L1 Cache")
-: Sequencer * sequencer;
-  Cycles issue_latency := 2;
+    : Sequencer * sequencer;
+      Cycles issue_latency := 2;
+
+      // NETWORK BUFFERS
+      MessageBuffer * requestFromCache, network="To", virtual_network="0",
+            ordered="false", vnet_type = "request";
+      MessageBuffer * forwardFromCache, network="To", virtual_network="1",
+            ordered="false", vnet_type = "forward";
+      MessageBuffer * responseFromCache, network="To", virtual_network="2",
+            ordered="false", vnet_type = "response";
 {
-
-  // NETWORK BUFFERS
-  MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false", vnet_type = "request";
-  MessageBuffer forwardFromCache, network="To", virtual_network="1", ordered="false", vnet_type = "forward";
-  MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false", vnet_type = "response";
-
   // STATES
   state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
     I,  AccessPermission:Invalid, desc="Not Present/Invalid";
index 47e248dffdf3548686847cdd69499d16b3b211b7..4d6472c542b08c18c634571a17789e06ce448789 100644 (file)
 
 
 machine(Directory, "Network_test Directory")
-:
+    : MessageBuffer * requestToDir, network="From", virtual_network="0",
+            ordered="false", vnet_type = "request";
+      MessageBuffer * forwardToDir, network="From", virtual_network="1",
+            ordered="false", vnet_type = "forward";
+      MessageBuffer * responseToDir, network="From", virtual_network="2",
+            ordered="false", vnet_type = "response";
 {
-
-  MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type = "request";
-  MessageBuffer forwardToDir, network="From", virtual_network="1", ordered="false", vnet_type = "forward";
-  MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type = "response";
-
   // STATES
   state_declaration(State, desc="Directory states", default="Directory_State_I") {
     // Base states
index 60531a423103b61ff0f77cae43fe0a7984747598..2d9376b082417dc6503cd7294f81110cbfad2296 100644 (file)
@@ -57,19 +57,6 @@ Network::Network(const Params *p)
     // Queues that are feeding the protocol
     m_fromNetQueues.resize(m_nodes);
 
-    for (int node = 0; node < m_nodes; node++) {
-        // Setting number of virtual message buffers per Network Queue
-        m_toNetQueues[node].resize(m_virtual_networks);
-        m_fromNetQueues[node].resize(m_virtual_networks);
-
-        // Instantiating the Message Buffers that
-        // interact with the coherence protocol
-        for (int j = 0; j < m_virtual_networks; j++) {
-            m_toNetQueues[node][j] = new MessageBuffer();
-            m_fromNetQueues[node][j] = new MessageBuffer();
-        }
-    }
-
     m_in_use.resize(m_virtual_networks);
     m_ordered.resize(m_virtual_networks);
 
@@ -95,10 +82,14 @@ Network::Network(const Params *p)
 Network::~Network()
 {
     for (int node = 0; node < m_nodes; node++) {
+
         // Delete the Message Buffers
-        for (int j = 0; j < m_virtual_networks; j++) {
-            delete m_toNetQueues[node][j];
-            delete m_fromNetQueues[node][j];
+        for (auto& it : m_toNetQueues[node]) {
+            delete it.second;
+        }
+
+        for (auto& it : m_fromNetQueues[node]) {
+            delete it.second;
         }
     }
 
index dcdd791e7d626acb5e0a161799e3b9695268038a..d595ca2850e7db75be0868217498e319abfaa8e8 100644 (file)
@@ -72,11 +72,10 @@ class Network : public ClockedObject
     static uint32_t MessageSizeType_to_int(MessageSizeType size_type);
 
     // returns the queue requested for the given component
-    virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered,
-        int netNumber, std::string vnet_type) = 0;
-    virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered,
-        int netNumber, std::string vnet_type) = 0;
-
+    virtual void setToNetQueue(NodeID id, bool ordered, int netNumber,
+                               std::string vnet_type, MessageBuffer *b) = 0;
+    virtual void setFromNetQueue(NodeID id, bool ordered, int netNumber,
+                                 std::string vnet_type, MessageBuffer *b) = 0;
 
     virtual void makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
                              LinkDirection direction,
@@ -113,8 +112,8 @@ class Network : public ClockedObject
     static uint32_t m_data_msg_size;
 
     // vector of queues from the components
-    std::vector<std::vector<MessageBuffer*> > m_toNetQueues;
-    std::vector<std::vector<MessageBuffer*> > m_fromNetQueues;
+    std::vector<std::map<int, MessageBuffer*> > m_toNetQueues;
+    std::vector<std::map<int, MessageBuffer*> > m_fromNetQueues;
 
     std::vector<bool> m_in_use;
     std::vector<bool> m_ordered;
index 4f33dd196b21c5b74e8438bbcff68ab68f94be4b..8cc38f26fb642bb6f88b5e4b892c0263b86e2aee 100644 (file)
@@ -28,7 +28,6 @@
 #          Brad Beckmann
 
 from m5.params import *
-from m5.SimObject import SimObject
 from ClockedObject import ClockedObject
 from BasicLink import BasicLink
 
@@ -48,3 +47,6 @@ class RubyNetwork(ClockedObject):
     netifs = VectorParam.ClockedObject("Network Interfaces")
     ext_links = VectorParam.BasicExtLink("Links to external nodes")
     int_links = VectorParam.BasicIntLink("Links between internal nodes")
+
+    slave = VectorSlavePort("CPU slave port")
+    master = VectorMasterPort("CPU master port")
index 92e45c36db8e605cc7c496ce63926aedaaf7bb91..2aeddad37ae911da1d7ad70d5b0b0b1687d7bcdc 100644 (file)
@@ -66,20 +66,20 @@ BaseGarnetNetwork::init()
     Network::init();
 }
 
-MessageBuffer*
-BaseGarnetNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
-                                 string vnet_type)
+void
+BaseGarnetNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
+                                 string vnet_type, MessageBuffer *b)
 {
     checkNetworkAllocation(id, ordered, network_num, vnet_type);
-    return m_toNetQueues[id][network_num];
+    m_toNetQueues[id][network_num] = b;
 }
 
-MessageBuffer*
-BaseGarnetNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,  
-                                   string vnet_type)
+void
+BaseGarnetNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
+                                   string vnet_type, MessageBuffer *b)
 {
     checkNetworkAllocation(id, ordered, network_num, vnet_type);
-    return m_fromNetQueues[id][network_num];
+    m_fromNetQueues[id][network_num] = b;
 }
 
 void
index c4bb9f5b184fbe0428a01eba0d7610fba52a448a..cc1d4d9299433835031af3023ec67c1a7879943e 100644 (file)
@@ -68,12 +68,11 @@ class BaseGarnetNetwork : public Network
         m_queueing_latency[vnet] += latency;
     }
 
-    // returns the queue requested for the given component
-    MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num,
-                                 std::string vnet_type);
-    MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num,
-                                   std::string vnet_type);
-
+    // set the queue
+    void setToNetQueue(NodeID id, bool ordered, int network_num,
+                       std::string vnet_type, MessageBuffer *b);
+    void setFromNetQueue(NodeID id, bool ordered, int network_num,
+                         std::string vnet_type, MessageBuffer *b);
 
     bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
     bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
index 2f1b5ee461284ed8622bee3832ef0ab0ea2794a6..7384cc6a7296b914c747f79b4ce880afe69d1139 100644 (file)
@@ -53,8 +53,6 @@ NetworkInterface_d::NetworkInterface_d(const Params *p)
     m_vc_round_robin = 0;
     m_ni_buffers.resize(m_num_vcs);
     m_ni_enqueue_time.resize(m_num_vcs);
-    inNode_ptr.resize(m_virtual_networks);
-    outNode_ptr.resize(m_virtual_networks);
     creditQueue = new flitBuffer_d();
 
     // instantiating the NI flit buffers
@@ -108,18 +106,20 @@ NetworkInterface_d::addOutPort(NetworkLink_d *out_link,
 }
 
 void
-NetworkInterface_d::addNode(vector<MessageBuffer *>& in,
-                            vector<MessageBuffer *>& out)
+NetworkInterface_d::addNode(map<int, MessageBuffer *>& in,
+                            map<int, MessageBuffer *>& out)
 {
-    assert(in.size() == m_virtual_networks);
     inNode_ptr = in;
     outNode_ptr = out;
 
-    for (int j = 0; j < m_virtual_networks; j++) {
+    for (auto& it : in) {
         // the protocol injects messages into the NI
-        inNode_ptr[j]->setConsumer(this);
-        inNode_ptr[j]->setReceiver(this);
-        outNode_ptr[j]->setSender(this);
+        it.second->setConsumer(this);
+        it.second->setReceiver(this);
+    }
+
+    for (auto& it : out) {
+        it.second->setSender(this);
     }
 }
 
@@ -223,11 +223,14 @@ NetworkInterface_d::wakeup()
 
     // Checking for messages coming from the protocol
     // can pick up a message/cycle for each virtual net
-    for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
-        while (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
-            msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+    for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
+        int vnet = (*it).first;
+        MessageBuffer *b = (*it).second;
+
+        while (b->isReady()) { // Is there a message waiting
+            msg_ptr = b->peekMsgPtr();
             if (flitisizeMessage(msg_ptr, vnet)) {
-                inNode_ptr[vnet]->dequeue();
+                b->dequeue();
             } else {
                 break;
             }
@@ -351,12 +354,15 @@ NetworkInterface_d::get_vnet(int vc)
 void
 NetworkInterface_d::checkReschedule()
 {
-    for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
-        if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
+    for (const auto& it : inNode_ptr) {
+        MessageBuffer *b = it.second;
+
+        while (b->isReady()) { // Is there a message waiting
             scheduleEvent(Cycles(1));
             return;
         }
     }
+
     for (int vc = 0; vc < m_num_vcs; vc++) {
         if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
             scheduleEvent(Cycles(1));
index 05142cd28a332cc49deb202c509e376371d34111..2494d05d1c5c6c93ad9eadb523e4dc8067aba33f 100644 (file)
@@ -60,8 +60,9 @@ class NetworkInterface_d : public ClockedObject, public Consumer
     void addOutPort(NetworkLink_d *out_link, CreditLink_d *credit_link);
 
     void wakeup();
-    void addNode(std::vector<MessageBuffer *> &inNode,
-                 std::vector<MessageBuffer *> &outNode);
+    void addNode(std::map<int, MessageBuffer *> &inNode,
+                 std::map<int, MessageBuffer *> &outNode);
+
     void print(std::ostream& out) const;
     int get_vnet(int vc);
     void init_net_ptr(GarnetNetwork_d *net_ptr) { m_net_ptr = net_ptr; }
@@ -89,9 +90,9 @@ class NetworkInterface_d : public ClockedObject, public Consumer
     std::vector<Cycles> m_ni_enqueue_time;
 
     // The Message buffers that takes messages from the protocol
-    std::vector<MessageBuffer *> inNode_ptr;
+    std::map<int, MessageBuffer *> inNode_ptr;
     // The Message buffers that provides messages to the protocol
-    std::vector<MessageBuffer *> outNode_ptr;
+    std::map<int, MessageBuffer *> outNode_ptr;
 
     bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
     int calculateVC(int vnet);
index 13bbe2b08181aa2032b5d50b7ff73411862872d7..26d2423e86b3d33ae8aa4cfd8e97af65cb46c6e6 100644 (file)
@@ -49,13 +49,10 @@ NetworkInterface::NetworkInterface(const Params *p)
     m_virtual_networks  = p->virt_nets;
     m_vc_per_vnet = p->vcs_per_vnet;
     m_num_vcs = m_vc_per_vnet*m_virtual_networks;
-
     m_vc_round_robin = 0;
-    m_ni_buffers.resize(m_num_vcs);
-    inNode_ptr.resize(m_virtual_networks);
-    outNode_ptr.resize(m_virtual_networks);
 
     // instantiating the NI flit buffers
+    m_ni_buffers.resize(m_num_vcs);
     for (int i =0; i < m_num_vcs; i++)
         m_ni_buffers[i] = new flitBuffer();
 
@@ -93,18 +90,20 @@ NetworkInterface::addOutPort(NetworkLink *out_link)
 }
 
 void
-NetworkInterface::addNode(vector<MessageBuffer*>& in,
-    vector<MessageBuffer*>& out)
+NetworkInterface::addNode(map<int, MessageBuffer*>& in,
+    map<int, MessageBuffer*>& out)
 {
-    assert(in.size() == m_virtual_networks);
     inNode_ptr = in;
     outNode_ptr = out;
 
-    // protocol injects messages into the NI
-    for (int j = 0; j < m_virtual_networks; j++) {
-        inNode_ptr[j]->setConsumer(this);
-        inNode_ptr[j]->setReceiver(this);
-        outNode_ptr[j]->setSender(this);
+    for (auto& it: in) {
+        // the protocol injects messages into the NI
+        it.second->setConsumer(this);
+        it.second->setReceiver(this);
+    }
+
+    for (auto& it : out) {
+        it.second->setSender(this);
     }
 }
 
@@ -243,12 +242,14 @@ NetworkInterface::wakeup()
 
     //Checking for messages coming from the protocol
     // can pick up a message/cycle for each virtual net
-    for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
-        while (inNode_ptr[vnet]->isReady()) // Is there a message waiting
-        {
-            msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+    for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
+        int vnet = (*it).first;
+        MessageBuffer *b = (*it).second;
+
+        while (b->isReady()) { // Is there a message waiting
+            msg_ptr = b->peekMsgPtr();
             if (flitisizeMessage(msg_ptr, vnet)) {
-                inNode_ptr[vnet]->dequeue();
+                b->dequeue();
             } else {
                 break;
             }
@@ -324,14 +325,17 @@ NetworkInterface::scheduleOutputLink()
 void
 NetworkInterface::checkReschedule()
 {
-    for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
-        if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
+    for (const auto& it : inNode_ptr) {
+        MessageBuffer *b = it.second;
+
+        while (b->isReady()) { // Is there a message waiting
             scheduleEvent(Cycles(1));
             return;
         }
     }
+
     for (int vc = 0; vc < m_num_vcs; vc++) {
-        if (m_ni_buffers[vc]->isReadyForNext(curCycle())) {
+        if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
             scheduleEvent(Cycles(1));
             return;
         }
index 0af538bf22fb04798a25efc211f4302e856435f0..aa30bd758da6b26299c12a087d741f51a7ac3246 100644 (file)
@@ -56,10 +56,10 @@ class NetworkInterface : public ClockedObject, public FlexibleConsumer
 
     void addInPort(NetworkLink *in_link);
     void addOutPort(NetworkLink *out_link);
+    void addNode(std::map<int, MessageBuffer *> &inNode,
+                 std::map<int, MessageBuffer *> &outNode);
 
     void wakeup();
-    void addNode(std::vector<MessageBuffer *> &inNode,
-                 std::vector<MessageBuffer *> &outNode);
     void grant_vc(int out_port, int vc, Cycles grant_time);
     void release_vc(int out_port, int vc, Cycles release_time);
 
@@ -93,10 +93,10 @@ class NetworkInterface : public ClockedObject, public FlexibleConsumer
     std::vector<flitBuffer *>   m_ni_buffers;
 
     // The Message buffers that takes messages from the protocol
-    std::vector<MessageBuffer *> inNode_ptr;
+    std::map<int, MessageBuffer *> inNode_ptr;
 
     // The Message buffers that provides messages to the protocol
-    std::vector<MessageBuffer *> outNode_ptr;
+    std::map<int, MessageBuffer *> outNode_ptr;
 
     bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
     int calculateVC(int vnet);
index 851ababc413f926653ef8098c2ecab741696db38..0fc2c6be3ee26f80a074ab5790cd5bf9c827225b 100644 (file)
@@ -387,7 +387,7 @@ Router::checkReschedule()
 {
     for (int port = 0; port < m_out_link.size(); port++) {
         for (int vc = 0; vc < m_num_vcs; vc++) {
-            if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
+            if (m_router_buffers[port][vc]->isReady(curCycle() + Cycles(1))) {
                 scheduleEvent(Cycles(1));
                 return;
             }
index 7a8ea8c23ca21bc1a365bb727ad037e9fc545ab3..ee31ac3d25815aaa89917e37e26e96e71a7c6b60 100644 (file)
@@ -61,17 +61,6 @@ flitBuffer::isReady(Cycles curTime)
     return false;
 }
 
-bool
-flitBuffer::isReadyForNext(Cycles curTime)
-{
-    if (m_buffer.size() != 0 ) {
-        flit *t_flit = m_buffer.front();
-        if (t_flit->get_time() <= (curTime + 1))
-            return true;
-    }
-    return false;
-}
-
 bool
 flitBuffer::isFull()
 {
index 609c5a9b66c24f54cda71aafc0a03732463388d4..99fa2678b416f4da2d3d8c2ec2922d63453ae519 100644 (file)
@@ -44,7 +44,6 @@ class flitBuffer
     flitBuffer(int maximum_size);
 
     bool isReady(Cycles curTime);
-    bool isReadyForNext(Cycles curTime);
     bool isFull();
     bool isEmpty();
     void setMaxSize(int maximum);
index 0c6111c484ac2f4050f4f4c261b1530a70448685..4565711a2a8d39acfdeddea89eb56010efd4e8a3 100644 (file)
@@ -61,36 +61,33 @@ PerfectSwitch::init(SimpleNetwork *network_ptr)
 {
     m_network_ptr = network_ptr;
 
-    for(int i = 0;i < m_virtual_networks;++i)
-    {
+    for(int i = 0;i < m_virtual_networks;++i) {
         m_pending_message_count.push_back(0);
     }
 }
 
 void
-PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
+PerfectSwitch::addInPort(const map<int, MessageBuffer*>& in)
 {
-    assert(in.size() == m_virtual_networks);
     NodeID port = m_in.size();
     m_in.push_back(in);
 
-    for (int j = 0; j < m_virtual_networks; j++) {
-        m_in[port][j]->setConsumer(this);
+    for (auto& it : in) {
+        it.second->setConsumer(this);
 
         string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
-            to_string(m_switch_id), to_string(port), to_string(j));
-        m_in[port][j]->setDescription(desc);
-        m_in[port][j]->setIncomingLink(port);
-        m_in[port][j]->setVnet(j);
+            to_string(m_switch_id), to_string(port), to_string(it.first));
+
+        it.second->setDescription(desc);
+        it.second->setIncomingLink(port);
+        it.second->setVnet(it.first);
     }
 }
 
 void
-PerfectSwitch::addOutPort(const vector<MessageBuffer*>& out,
+PerfectSwitch::addOutPort(const map<int, MessageBuffer*>& out,
     const NetDest& routing_table_entry)
 {
-    assert(out.size() == m_virtual_networks);
-
     // Setup link order
     LinkOrder l;
     l.m_value = 0;
@@ -152,11 +149,16 @@ PerfectSwitch::wakeup()
                 vector<NetDest> output_link_destinations;
 
                 // Is there a message waiting?
-                while (m_in[incoming][vnet]->isReady()) {
+                auto it = m_in[incoming].find(vnet);
+                if (it == m_in[incoming].end())
+                    continue;
+                MessageBuffer *buffer = (*it).second;
+
+                while (buffer->isReady()) {
                     DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
 
                     // Peek at message
-                    msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
+                    msg_ptr = buffer->peekMsgPtr();
                     net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
                     DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
 
@@ -261,7 +263,7 @@ PerfectSwitch::wakeup()
                     }
 
                     // Dequeue msg
-                    m_in[incoming][vnet]->dequeue();
+                    buffer->dequeue();
                     m_pending_message_count[vnet]--;
 
                     // Enqueue it - for all outgoing queues
index c01c50a3bff5786cc5e95c8b00732fa7ab4c0087..25e3e2754f1a4a4409a89673ed42b038036eb227 100644 (file)
@@ -65,9 +65,10 @@ class PerfectSwitch : public Consumer
     { return csprintf("PerfectSwitch-%i", m_switch_id); }
 
     void init(SimpleNetwork *);
-    void addInPort(const std::vector<MessageBuffer*>& in);
-    void addOutPort(const std::vector<MessageBuffer*>& out,
+    void addInPort(const std::map<int, MessageBuffer*>& in);
+    void addOutPort(const std::map<int, MessageBuffer*>& out,
                     const NetDest& routing_table_entry);
+
     int getInLinks() const { return m_in.size(); }
     int getOutLinks() const { return m_out.size(); }
 
@@ -86,8 +87,9 @@ class PerfectSwitch : public Consumer
     SwitchID m_switch_id;
 
     // vector of queues from the components
-    std::vector<std::vector<MessageBuffer*> > m_in;
-    std::vector<std::vector<MessageBuffer*> > m_out;
+    std::vector<std::map<int, MessageBuffer*> > m_in;
+    std::vector<std::map<int, MessageBuffer*> > m_out;
+
     std::vector<NetDest> m_routing_table;
     std::vector<LinkOrder> m_link_order;
 
index 2d08f9fa42827bbf4ba684396de00d6a49cafe19..f51a0c891ac631e01c7dc12ff55817b93e12e381 100644 (file)
@@ -93,10 +93,9 @@ SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
 
     SimpleExtLink *simple_link = safe_cast<SimpleExtLink*>(link);
 
-    m_switches[src]->addOutPort(m_fromNetQueues[dest],
-                                         routing_table_entry,
-                                         simple_link->m_latency,
-                                         simple_link->m_bw_multiplier);
+    m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
+                                simple_link->m_latency,
+                                simple_link->m_bw_multiplier);
 
     m_endpoint_switches[dest] = m_switches[src];
 }
@@ -118,25 +117,28 @@ SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link,
                                 const NetDest& routing_table_entry)
 {
     // Create a set of new MessageBuffers
-    std::vector<MessageBuffer*> queues;
+    std::map<int, MessageBuffer*> queues;
     for (int i = 0; i < m_virtual_networks; i++) {
         // allocate a buffer
         MessageBuffer* buffer_ptr = new MessageBuffer;
         buffer_ptr->setOrdering(true);
+
         if (m_buffer_size > 0) {
             buffer_ptr->resize(m_buffer_size);
         }
-        queues.push_back(buffer_ptr);
+
+        queues[i] = buffer_ptr;
         // remember to deallocate it
         m_buffers_to_free.push_back(buffer_ptr);
     }
+
     // Connect it to the two switches
     SimpleIntLink *simple_link = safe_cast<SimpleIntLink*>(link);
 
     m_switches[dest]->addInPort(queues);
     m_switches[src]->addOutPort(queues, routing_table_entry,
-                                         simple_link->m_latency, 
-                                         simple_link->m_bw_multiplier);
+                                simple_link->m_latency,
+                                simple_link->m_bw_multiplier);
 }
 
 void
@@ -151,20 +153,20 @@ SimpleNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
     m_in_use[network_num] = true;
 }
 
-MessageBuffer*
-SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
-                             std::string vnet_type)
+void
+SimpleNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
+                             std::string vnet_type, MessageBuffer *b)
 {
     checkNetworkAllocation(id, ordered, network_num);
-    return m_toNetQueues[id][network_num];
+    m_toNetQueues[id][network_num] = b;
 }
 
-MessageBuffer*
-SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
-                               std::string vnet_type)
+void
+SimpleNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
+                               std::string vnet_type, MessageBuffer *b)
 {
     checkNetworkAllocation(id, ordered, network_num);
-    return m_fromNetQueues[id][network_num];
+    m_fromNetQueues[id][network_num] = b;
 }
 
 void
index 90560c2674af84ff2a67d7a6b581d35538b7ddf8..a2723c7159e95b18b6901809327155382b8200dd 100644 (file)
@@ -56,9 +56,11 @@ class SimpleNetwork : public Network
     void collateStats();
     void regStats();
 
-    // returns the queue requested for the given component
-    MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
-    MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
+    // sets the queue requested
+    void setToNetQueue(NodeID id, bool ordered, int network_num,
+                       std::string vnet_type, MessageBuffer *b);
+    void setFromNetQueue(NodeID id, bool ordered, int network_num,
+                         std::string vnet_type, MessageBuffer *b);
 
     bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
     bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
@@ -89,6 +91,7 @@ class SimpleNetwork : public Network
     // Private copy constructor and assignment operator
     SimpleNetwork(const SimpleNetwork& obj);
     SimpleNetwork& operator=(const SimpleNetwork& obj);
+
     std::vector<Switch*> m_switches;
     std::vector<MessageBuffer*> m_buffers_to_free;
     std::vector<Switch*> m_endpoint_switches;
index 6e116d82cdd8bfc5c3e746dff637e0a648386724..e028de02ae4eb6e8c294569d6c8600ab96c4ba47 100644 (file)
@@ -64,29 +64,33 @@ Switch::init()
 }
 
 void
-Switch::addInPort(const vector<MessageBuffer*>& in)
+Switch::addInPort(const map<int, MessageBuffer*>& in)
 {
     m_perfect_switch->addInPort(in);
 
-    for (int i = 0; i < in.size(); i++) {
-        in[i]->setReceiver(this);
+    for (auto& it : in) {
+        it.second->setReceiver(this);
     }
 }
 
 void
-Switch::addOutPort(const vector<MessageBuffer*>& out,
-    const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
+Switch::addOutPort(const map<int, MessageBuffer*>& out,
+                   const NetDest& routing_table_entry,
+                   Cycles link_latency, int bw_multiplier)
 {
     // Create a throttle
     Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
-            link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
-            this);
+                                          link_latency, bw_multiplier,
+                                          m_network_ptr->getEndpointBandwidth(),
+                                          this);
+
     m_throttles.push_back(throttle_ptr);
 
     // Create one buffer per vnet (these are intermediaryQueues)
-    vector<MessageBuffer*> intermediateBuffers;
-    for (int i = 0; i < out.size(); i++) {
-        out[i]->setSender(this);
+    map<int, MessageBuffer*> intermediateBuffers;
+
+    for (auto& it : out) {
+        it.second->setSender(this);
 
         MessageBuffer* buffer_ptr = new MessageBuffer;
         // Make these queues ordered
@@ -95,7 +99,7 @@ Switch::addOutPort(const vector<MessageBuffer*>& out,
             buffer_ptr->resize(m_network_ptr->getBufferSize());
         }
 
-        intermediateBuffers.push_back(buffer_ptr);
+        intermediateBuffers[it.first] = buffer_ptr;
         m_buffers_to_free.push_back(buffer_ptr);
 
         buffer_ptr->setSender(this);
index 58193d42d627cdd73ef61c3579d9007414b5d31b..d4e5c5eba11f79ec5564c3a15feeac5702ba80de 100644 (file)
@@ -60,12 +60,13 @@ class Switch : public BasicRouter
     typedef SwitchParams Params;
     Switch(const Params *p);
     ~Switch();
-
     void init();
-    void addInPort(const std::vector<MessageBuffer*>& in);
-    void addOutPort(const std::vector<MessageBuffer*>& out,
-        const NetDest& routing_table_entry, Cycles link_latency,
-        int bw_multiplier);
+
+    void addInPort(const std::map<int, MessageBuffer*>& in);
+    void addOutPort(const std::map<int, MessageBuffer*>& out,
+                    const NetDest& routing_table_entry,
+                    Cycles link_latency, int bw_multiplier);
+
     const Throttle* getThrottle(LinkID link_number) const;
 
     void resetStats();
index 40958a6daf00ac00fbaee5c067a29eded877e5d6..91bad217bbfb32662f8bd70e5330bfe8a987145d 100644 (file)
@@ -69,42 +69,92 @@ Throttle::init(NodeID node, Cycles link_latency,
                int link_bandwidth_multiplier, int endpoint_bandwidth)
 {
     m_node = node;
-    m_vnets = 0;
-
     assert(link_bandwidth_multiplier > 0);
     m_link_bandwidth_multiplier = link_bandwidth_multiplier;
+
     m_link_latency = link_latency;
     m_endpoint_bandwidth = endpoint_bandwidth;
 
     m_wakeups_wo_switch = 0;
-
     m_link_utilization_proxy = 0;
 }
 
 void
-Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
-    const std::vector<MessageBuffer*>& out_vec)
+Throttle::addLinks(const map<int, MessageBuffer*>& in_vec,
+                   const map<int, MessageBuffer*>& out_vec)
 {
     assert(in_vec.size() == out_vec.size());
-    for (int i=0; i<in_vec.size(); i++) {
-        addVirtualNetwork(in_vec[i], out_vec[i]);
+
+    for (auto& it : in_vec) {
+        int vnet = it.first;
+
+        auto jt = out_vec.find(vnet);
+        assert(jt != out_vec.end());
+
+        MessageBuffer *in_ptr = it.second;
+        MessageBuffer *out_ptr = (*jt).second;
+
+        m_in[vnet] = in_ptr;
+        m_out[vnet] = out_ptr;
+        m_units_remaining[vnet] = 0;
+
+        // Set consumer and description
+        in_ptr->setConsumer(this);
+        string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
+            to_string(m_node) + "]";
+        in_ptr->setDescription(desc);
     }
 }
 
 void
-Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
+Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
+                      MessageBuffer *in, MessageBuffer *out)
 {
-    m_units_remaining.push_back(0);
-    m_in.push_back(in_ptr);
-    m_out.push_back(out_ptr);
+    assert(out != NULL);
+    assert(in != NULL);
+    assert(m_units_remaining[vnet] >= 0);
+
+    while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
+                                out->areNSlotsAvailable(1)) {
+
+        // See if we are done transferring the previous message on
+        // this virtual network
+        if (m_units_remaining[vnet] == 0 && in->isReady()) {
+            // Find the size of the message we are moving
+            MsgPtr msg_ptr = in->peekMsgPtr();
+            NetworkMessage* net_msg_ptr =
+                safe_cast<NetworkMessage*>(msg_ptr.get());
+            m_units_remaining[vnet] +=
+                network_message_to_size(net_msg_ptr);
+
+            DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
+                    "enqueueing net msg %d time: %lld.\n",
+                    m_node, getLinkBandwidth(), m_units_remaining[vnet],
+                    g_system_ptr->curCycle());
+
+            // Move the message
+            in->dequeue();
+            out->enqueue(msg_ptr, m_link_latency);
+
+            // Count the message
+            m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
+            DPRINTF(RubyNetwork, "%s\n", *out);
+        }
+
+        // Calculate the amount of bandwidth we spent on this message
+        int diff = m_units_remaining[vnet] - bw_remaining;
+        m_units_remaining[vnet] = max(0, diff);
+        bw_remaining = max(0, -diff);
+    }
 
-    // Set consumer and description
-    m_in[m_vnets]->setConsumer(this);
+    if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
+                             !out->areNSlotsAvailable(1)) {
+        DPRINTF(RubyNetwork, "vnet: %d", vnet);
 
-    string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
-        to_string(m_node) + "]";
-    m_in[m_vnets]->setDescription(desc);
-    m_vnets++;
+        // schedule me to wakeup again because I'm waiting for my
+        // output queue to become available
+        schedule_wakeup = true;
+    }
 }
 
 void
@@ -114,71 +164,30 @@ Throttle::wakeup()
     assert(getLinkBandwidth() > 0);
     int bw_remaining = getLinkBandwidth();
 
-    // Give the highest numbered link priority most of the time
     m_wakeups_wo_switch++;
-    int highest_prio_vnet = m_vnets-1;
-    int lowest_prio_vnet = 0;
-    int counter = 1;
     bool schedule_wakeup = false;
 
+    // variable for deciding the direction in which to iterate
+    bool iteration_direction = false;
+
+
     // invert priorities to avoid starvation seen in the component network
     if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
         m_wakeups_wo_switch = 0;
-        highest_prio_vnet = 0;
-        lowest_prio_vnet = m_vnets-1;
-        counter = -1;
+        iteration_direction = true;
     }
 
-    for (int vnet = highest_prio_vnet;
-         (vnet * counter) >= (counter * lowest_prio_vnet);
-         vnet -= counter) {
-
-        assert(m_out[vnet] != NULL);
-        assert(m_in[vnet] != NULL);
-        assert(m_units_remaining[vnet] >= 0);
-
-        while (bw_remaining > 0 &&
-            (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
-            m_out[vnet]->areNSlotsAvailable(1)) {
-
-            // See if we are done transferring the previous message on
-            // this virtual network
-            if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
-                // Find the size of the message we are moving
-                MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
-                NetworkMessage* net_msg_ptr =
-                    safe_cast<NetworkMessage*>(msg_ptr.get());
-                m_units_remaining[vnet] +=
-                    network_message_to_size(net_msg_ptr);
-
-                DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
-                        "enqueueing net msg %d time: %lld.\n",
-                        m_node, getLinkBandwidth(), m_units_remaining[vnet],
-                        g_system_ptr->curCycle());
-
-                // Move the message
-                m_in[vnet]->dequeue();
-                m_out[vnet]->enqueue(msg_ptr, m_link_latency);
-
-                // Count the message
-                m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
-
-                DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
-            }
-
-            // Calculate the amount of bandwidth we spent on this message
-            int diff = m_units_remaining[vnet] - bw_remaining;
-            m_units_remaining[vnet] = max(0, diff);
-            bw_remaining = max(0, -diff);
+    if (iteration_direction) {
+        for (auto& it : m_in) {
+            int vnet = it.first;
+            operateVnet(vnet, bw_remaining, schedule_wakeup,
+                        it.second, m_out[vnet]);
         }
-
-        if (bw_remaining > 0 &&
-            (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
-            !m_out[vnet]->areNSlotsAvailable(1)) {
-            DPRINTF(RubyNetwork, "vnet: %d", vnet);
-            // schedule me to wakeup again because I'm waiting for my
-            // output queue to become available
-            schedule_wakeup = true;
+    } else {
+        for (auto it = m_in.rbegin(); it != m_in.rend(); ++it) {
+            int vnet = (*it).first;
+            operateVnet(vnet, bw_remaining, schedule_wakeup,
+                        (*it).second, m_out[vnet]);
         }
     }
 
@@ -215,7 +224,7 @@ Throttle::regStats(string parent)
     for (MessageSizeType type = MessageSizeType_FIRST;
          type < MessageSizeType_NUM; ++type) {
         m_msg_counts[(unsigned int)type]
-            .init(m_vnets)
+            .init(Network::getNumberOfVirtualNetworks())
             .name(parent + csprintf(".throttle%i", m_node) + ".msg_count." +
                     MessageSizeType_to_string(type))
             .flags(Stats::nozero)
index cdc627bb79eccc538d03c9b6d8d4cf59d83d5770..d978f14fd782fa5c6c9017a717698865e6b02a0a 100644 (file)
@@ -62,8 +62,8 @@ class Throttle : public Consumer
     std::string name()
     { return csprintf("Throttle-%i", m_sID); }
 
-    void addLinks(const std::vector<MessageBuffer*>& in_vec,
-                  const std::vector<MessageBuffer*>& out_vec);
+    void addLinks(const std::map<int, MessageBuffer*>& in_vec,
+                  const std::map<int, MessageBuffer*>& out_vec);
     void wakeup();
 
     // The average utilization (a fraction) since last clearStats()
@@ -85,16 +85,17 @@ class Throttle : public Consumer
   private:
     void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
               int endpoint_bandwidth);
-    void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
+    void operateVnet(int vnet, int &bw_remainin, bool &schedule_wakeup,
+                     MessageBuffer *in, MessageBuffer *out);
 
     // Private copy constructor and assignment operator
     Throttle(const Throttle& obj);
     Throttle& operator=(const Throttle& obj);
 
-    std::vector<MessageBuffer*> m_in;
-    std::vector<MessageBuffer*> m_out;
-    unsigned int m_vnets;
-    std::vector<int> m_units_remaining;
+    std::map<int, MessageBuffer*> m_in;
+    std::map<int, MessageBuffer*> m_out;
+    std::map<int, int> m_units_remaining;
+
     int m_sID;
     NodeID m_node;
     int m_link_bandwidth_multiplier;
index 0f5a70a6e8b7704424d58646e0a70502e87f4da1..366ea04ce56bbbc1a43dcef395ae26d30b17fdca 100644 (file)
@@ -88,13 +88,6 @@ AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
     m_delayVCHistogram[virtualNetwork]->sample(delay);
 }
 
-void
-AbstractController::connectWithPeer(AbstractController *c)
-{
-    getQueuesFromPeer(c);
-    c->getQueuesFromPeer(this);
-}
-
 void
 AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
 {
index 36b4665c38c7f93f390a91b9eb8b3f0e791052a7..42d158653cb4f08c5b6a166a3a8f32fd00cdefd1 100644 (file)
@@ -96,6 +96,9 @@ class AbstractController : public ClockedObject, public Consumer
     virtual void collateStats()
     {fatal("collateStats() should be overridden!");}
 
+    //! Set the message buffer with given name.
+    virtual void setNetQueue(const std::string& name, MessageBuffer *b) = 0;
+
   public:
     MachineID getMachineID() const { return m_machineID; }
 
@@ -103,25 +106,12 @@ class AbstractController : public ClockedObject, public Consumer
     Stats::Histogram& getDelayVCHist(uint32_t index)
     { return *(m_delayVCHistogram[index]); }
 
-    MessageBuffer *getPeerQueue(uint32_t pid)
-    {
-        std::map<uint32_t, MessageBuffer *>::iterator it =
-                                        peerQueueMap.find(pid);
-        assert(it != peerQueueMap.end());
-        return (*it).second;
-    }
-
   protected:
     //! Profiles original cache requests including PUTs
     void profileRequest(const std::string &request);
     //! Profiles the delay associated with messages.
     void profileMsgDelay(uint32_t virtualNetwork, Cycles delay);
 
-    //! Function for connecting peer controllers
-    void connectWithPeer(AbstractController *);
-    virtual void getQueuesFromPeer(AbstractController *)
-    { fatal("getQueuesFromPeer() should be called only if implemented!"); }
-
     void stallBuffer(MessageBuffer* buf, Address addr);
     void wakeUpBuffers(Address addr);
     void wakeUpAllBuffers(Address addr);
@@ -147,9 +137,6 @@ class AbstractController : public ClockedObject, public Consumer
     unsigned int m_buffer_size;
     Cycles m_recycle_latency;
 
-    //! Map from physical network number to the Message Buffer.
-    std::map<uint32_t, MessageBuffer*> peerQueueMap;
-
     //! Counter for the number of cycles when the transitions carried out
     //! were equal to the maximum allowed
     Stats::Scalar m_fully_busy_cycles;
index 71fcc053fe9edd597e5d3d7566de46a6b581ac86..7360136128f9ff5bac3c708306bdb8a3a5a5bb63 100644 (file)
@@ -51,7 +51,12 @@ class StateMachine(Symbol):
     def __init__(self, symtab, ident, location, pairs, config_parameters):
         super(StateMachine, self).__init__(symtab, ident, location, pairs)
         self.table = None
+
+        # Data members in the State Machine that have been declared before
+        # the opening brace '{'  of the machine.  Note that these along with
+        # the members in self.objects form the entire set of data members.
         self.config_parameters = config_parameters
+
         self.prefetchers = []
 
         for param in config_parameters:
@@ -74,6 +79,10 @@ class StateMachine(Symbol):
         self.transitions = []
         self.in_ports = []
         self.functions = []
+
+        # Data members in the State Machine that have been declared inside
+        # the {} machine.  Note that these along with the config params
+        # form the entire set of data members of the machine.
         self.objects = []
         self.TBEType   = None
         self.EntryType = None
@@ -200,7 +209,13 @@ class $py_ident(RubyController):
             if param.rvalue is not None:
                 dflt_str = str(param.rvalue.inline()) + ', '
 
-            if python_class_map.has_key(param.type_ast.type.c_ident):
+            if param.type_ast.type.c_ident == "MessageBuffer":
+                if param["network"] == "To":
+                    code('${{param.ident}} = MasterPort(${dflt_str}"")')
+                else:
+                    code('${{param.ident}} = SlavePort(${dflt_str}"")')
+
+            elif python_class_map.has_key(param.type_ast.type.c_ident):
                 python_type = python_class_map[param.type_ast.type.c_ident]
                 code('${{param.ident}} = Param.${{python_type}}(${dflt_str}"")')
 
@@ -241,13 +256,10 @@ class $py_ident(RubyController):
 ''')
 
         seen_types = set()
-        has_peer = False
         for var in self.objects:
             if var.type.ident not in seen_types and not var.type.isPrimitive:
                 code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
-            if "network" in var and "physical_network" in var:
-                has_peer = True
-            seen_types.add(var.type.ident)
+                seen_types.add(var.type.ident)
 
         # for adding information to the protocol debug trace
         code('''
@@ -260,7 +272,9 @@ class $c_ident : public AbstractController
     $c_ident(const Params *p);
     static int getNumControllers();
     void init();
+
     MessageBuffer* getMandatoryQueue() const;
+    void setNetQueue(const std::string& name, MessageBuffer *b);
 
     void print(std::ostream& out) const;
     void wakeup();
@@ -340,8 +354,6 @@ static int m_num_controllers;
             if proto:
                 code('$proto')
 
-        if has_peer:
-            code('void getQueuesFromPeer(AbstractController *);')
         if self.EntryType != None:
             code('''
 
@@ -404,7 +416,6 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr);
         code = self.symtab.codeFormatter()
         ident = self.ident
         c_ident = "%s_Controller" % self.ident
-        has_peer = False
 
         code('''
 /** \\file $c_ident.cc
@@ -486,10 +497,17 @@ $c_ident::$c_ident(const Params *p)
         # include a sequencer, connect the it to the controller.
         #
         for param in self.config_parameters:
+
+            # Do not initialize messgage buffers since they are initialized
+            # when the port based connections are made.
+            if param.type_ast.type.c_ident == "MessageBuffer":
+                continue
+
             if param.pointer:
                 code('m_${{param.ident}}_ptr = p->${{param.ident}};')
             else:
                 code('m_${{param.ident}} = p->${{param.ident}};')
+
             if re.compile("sequencer").search(param.ident):
                 code('m_${{param.ident}}_ptr->setController(this);')
             
@@ -498,20 +516,9 @@ $c_ident::$c_ident(const Params *p)
                 code('''
 m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
 m_${{var.ident}}_ptr->setReceiver(this);
-''')
-            else:
-                if "network" in var and "physical_network" in var and \
-                   var["network"] == "To":
-                    has_peer = True
-                    code('''
-m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
-peerQueueMap[${{var["physical_network"]}}] = m_${{var.ident}}_ptr;
-m_${{var.ident}}_ptr->setSender(this);
 ''')
 
         code('''
-if (p->peer != NULL)
-    connectWithPeer(p->peer);
 
 for (int state = 0; state < ${ident}_State_NUM; state++) {
     for (int event = 0; event < ${ident}_Event_NUM; event++) {
@@ -528,16 +535,92 @@ for (int event = 0; event < ${ident}_Event_NUM; event++) {
 }
 
 void
-$c_ident::init()
+$c_ident::setNetQueue(const std::string& name, MessageBuffer *b)
 {
-    MachineType machine_type = string_to_MachineType("${{var.machine.ident}}");
+    MachineType machine_type = string_to_MachineType("${{self.ident}}");
     int base M5_VAR_USED = MachineType_base_number(machine_type);
 
+''')
+        code.indent()
+
+        # set for maintaining the vnet, direction pairs already seen for this
+        # machine.  This map helps in implementing the check for avoiding
+        # multiple message buffers being mapped to the same vnet.
+        vnet_dir_set = set()
+
+        for var in self.config_parameters:
+            if "network" in var:
+                vtype = var.type_ast.type
+                vid = "m_%s_ptr" % var.ident
+
+                code('''
+if ("${{var.ident}}" == name) {
+    $vid = b;
+    assert($vid != NULL);
+''')
+                code.indent()
+                # Network port object
+                network = var["network"]
+                ordered =  var["ordered"]
+
+                if "virtual_network" in var:
+                    vnet = var["virtual_network"]
+                    vnet_type = var["vnet_type"]
+
+                    assert (vnet, network) not in vnet_dir_set
+                    vnet_dir_set.add((vnet,network))
+
+                    code('''
+m_net_ptr->set${network}NetQueue(m_version + base, $ordered, $vnet,
+                                 "$vnet_type", b);
+''')
+                # Set the end
+                if network == "To":
+                    code('$vid->setSender(this);')
+                else:
+                    code('$vid->setReceiver(this);')
+
+                # Set ordering
+                code('$vid->setOrdering(${{var["ordered"]}});')
+
+                # Set randomization
+                if "random" in var:
+                    # A buffer
+                    code('$vid->setRandomization(${{var["random"]}});')
+
+                # Set Priority
+                if "rank" in var:
+                    code('$vid->setPriority(${{var["rank"]}})')
+
+                # Set buffer size
+                code('$vid->resize(m_buffer_size);')
+
+                if "recycle_latency" in var:
+                    code('$vid->setRecycleLatency( ' \
+                         'Cycles(${{var["recycle_latency"]}}));')
+                else:
+                    code('$vid->setRecycleLatency(m_recycle_latency);')
+
+                # set description (may be overriden later by port def)
+                code('''
+$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
+''')
+                code.dedent()
+                code('}\n')
+
+        code.dedent()
+        code('''
+}
+
+void
+$c_ident::init()
+{
     // initialize objects
 
 ''')
 
         code.indent()
+
         for var in self.objects:
             vtype = var.type
             vid = "m_%s_ptr" % var.ident
@@ -589,55 +672,6 @@ $c_ident::init()
                         code('$vid->setSender(this);')
                         code('$vid->setReceiver(this);')
 
-            else:
-                # Network port object
-                network = var["network"]
-                ordered =  var["ordered"]
-
-                if "virtual_network" in var:
-                    vnet = var["virtual_network"]
-                    vnet_type = var["vnet_type"]
-
-                    assert var.machine is not None
-                    code('''
-$vid = m_net_ptr->get${network}NetQueue(m_version + base, $ordered, $vnet, "$vnet_type");
-assert($vid != NULL);
-''')
-
-                    # Set the end
-                    if network == "To":
-                        code('$vid->setSender(this);')
-                    else:
-                        code('$vid->setReceiver(this);')
-
-                # Set ordering
-                if "ordered" in var:
-                    # A buffer
-                    code('$vid->setOrdering(${{var["ordered"]}});')
-
-                # Set randomization
-                if "random" in var:
-                    # A buffer
-                    code('$vid->setRandomization(${{var["random"]}});')
-
-                # Set Priority
-                if "rank" in var:
-                    code('$vid->setPriority(${{var["rank"]}})')
-
-                # Set buffer size
-                if vtype.isBuffer:
-                    code('''
-if (m_buffer_size > 0) {
-    $vid->resize(m_buffer_size);
-}
-''')
-
-                # set description (may be overriden later by port def)
-                code('''
-$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
-
-''')
-
             if vtype.isBuffer:
                 if "recycle_latency" in var:
                     code('$vid->setRecycleLatency( ' \
@@ -965,6 +999,13 @@ $c_ident::functionalReadBuffers(PacketPtr& pkt)
             if vtype.isBuffer:
                 vid = "m_%s_ptr" % var.ident
                 code('if ($vid->functionalRead(pkt)) { return true; }')
+
+        for var in self.config_parameters:
+            vtype = var.type_ast.type
+            if vtype.isBuffer:
+                vid = "m_%s_ptr" % var.ident
+                code('if ($vid->functionalRead(pkt)) { return true; }')
+
         code('''
                 return false;
 }
@@ -982,31 +1023,18 @@ $c_ident::functionalWriteBuffers(PacketPtr& pkt)
             if vtype.isBuffer:
                 vid = "m_%s_ptr" % var.ident
                 code('num_functional_writes += $vid->functionalWrite(pkt);')
+
+        for var in self.config_parameters:
+            vtype = var.type_ast.type
+            if vtype.isBuffer:
+                vid = "m_%s_ptr" % var.ident
+                code('num_functional_writes += $vid->functionalWrite(pkt);')
+
         code('''
     return num_functional_writes;
 }
 ''')
 
-        # Check if this controller has a peer, if yes then write the
-        # function for connecting to the peer.
-        if has_peer:
-            code('''
-
-void
-$c_ident::getQueuesFromPeer(AbstractController *peer)
-{
-''')
-            for var in self.objects:
-                if "network" in var and "physical_network" in var and \
-                   var["network"] == "From":
-                    code('''
-m_${{var.ident}}_ptr = peer->getPeerQueue(${{var["physical_network"]}});
-assert(m_${{var.ident}}_ptr != NULL);
-m_${{var.ident}}_ptr->setReceiver(this);
-
-''')
-            code('}')
-
         code.write(path, "%s.cc" % c_ident)
 
     def printCWakeup(self, path, includes):
index fe849ec885deeb78b2048e29f9d32644a690b800..51bd1f62ff6774eb4680e0a2d6421648b814e8c6 100644 (file)
@@ -39,6 +39,7 @@
 #include "dev/etherdevice.hh"
 #include "dev/etherobject.hh"
 #endif
+#include "mem/ruby/slicc_interface/AbstractController.hh"
 #include "mem/mem_object.hh"
 #include "python/swig/pyobject.hh"
 #include "sim/full_system.hh"
@@ -98,6 +99,27 @@ connectPorts(SimObject *o1, const std::string &name1, int i1,
         }
     }
 #endif
+
+    // These could be objects from the ruby memory system.  If yes, then at
+    // least one of them should be an abstract controller.  Do a type check.
+    AbstractController *ac1, *ac2;
+    ac1 = dynamic_cast<AbstractController*>(o1);
+    ac2 = dynamic_cast<AbstractController*>(o2);
+
+    if (ac1 || ac2) {
+        MessageBuffer *b = new MessageBuffer();
+
+        // set the message buffer associated with the provided names
+        if (ac1) {
+            ac1->setNetQueue(name1, b);
+        }
+        if (ac2) {
+            ac2->setNetQueue(name2, b);
+        }
+
+        return 1;
+    }
+
     MemObject *mo1, *mo2;
     mo1 = dynamic_cast<MemObject*>(o1);
     mo2 = dynamic_cast<MemObject*>(o2);