cpu_sequencers.append(cpu_seq)
l0_cntrl_nodes.append(l0_cntrl)
l1_cntrl_nodes.append(l1_cntrl)
- l0_cntrl.peer = l1_cntrl
+
+ # Connect the L0 and L1 controllers
+ l0_cntrl.bufferToL1 = l1_cntrl.bufferFromL0
+ l0_cntrl.bufferFromL1 = l1_cntrl.bufferToL0
+
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestToL2 = ruby_system.network.slave
+ l1_cntrl.responseToL2 = ruby_system.network.slave
+ l1_cntrl.unblockToL2 = ruby_system.network.slave
+
+ l1_cntrl.requestFromL2 = ruby_system.network.master
+ l1_cntrl.responseFromL2 = ruby_system.network.master
+
for j in xrange(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
i * num_l2caches_per_cluster + j))
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.unblockToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromL1Cache = ruby_system.network.slave
+ l1_cntrl.responseFromL1Cache = ruby_system.network.slave
+ l1_cntrl.unblockFromL1Cache = ruby_system.network.slave
+
+ l1_cntrl.requestToL1Cache = ruby_system.network.master
+ l1_cntrl.responseToL1Cache = ruby_system.network.master
+
+
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.unblockToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
+
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
- #
# Create the Ruby objects associated with the dma controller
- #
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
+ # Connect the dma controller to the network
+ dma_cntrl.responseFromDir = ruby_system.network.master
+ dma_cntrl.requestToDir = ruby_system.network.slave
+
+
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromCache = ruby_system.network.slave
+ l1_cntrl.responseFromCache = ruby_system.network.slave
+ l1_cntrl.forwardToCache = ruby_system.network.master
+ l1_cntrl.responseToCache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+ dir_cntrl.forwardFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
- all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+ # Connect the directory controllers and the network
+ dma_cntrl.requestToDir = ruby_system.network.master
+ dma_cntrl.responseFromDir = ruby_system.network.slave
- topology = create_topology(all_cntrls, options)
+ all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+ topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromL1Cache = ruby_system.network.slave
+ l1_cntrl.responseFromL1Cache = ruby_system.network.slave
+ l1_cntrl.requestToL1Cache = ruby_system.network.master
+ l1_cntrl.responseToL1Cache = ruby_system.network.master
+
+
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
+
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.forwardFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
+
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
-
return (cpu_sequencers, dir_cntrl_nodes, topology)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromL1Cache = ruby_system.network.slave
+ l1_cntrl.responseFromL1Cache = ruby_system.network.slave
+ l1_cntrl.persistentFromL1Cache = ruby_system.network.slave
+
+ l1_cntrl.requestToL1Cache = ruby_system.network.master
+ l1_cntrl.responseToL1Cache = ruby_system.network.master
+ l1_cntrl.persistentToL1Cache = ruby_system.network.master
+
+
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+ l2_cntrl.persistentToL2Cache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.persistentToDir = ruby_system.network.master
+ dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+ dir_cntrl.requestFromDir = ruby_system.network.slave
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.persistentFromDir = ruby_system.network.slave
+ dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
l1_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
+
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controller and the network
+ # Connect the buffers from the controller to network
+ l1_cntrl.requestFromCache = ruby_system.network.slave
+ l1_cntrl.responseFromCache = ruby_system.network.slave
+ l1_cntrl.unblockFromCache = ruby_system.network.slave
+
+ # Connect the buffers from the network to the controller
+ l1_cntrl.forwardToCache = ruby_system.network.master
+ l1_cntrl.responseToCache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controller to the network
+ dir_cntrl.forwardFromDir = ruby_system.network.slave
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+
+ dir_cntrl.unblockToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
if options.recycle_latency:
dma_cntrl.recycle_latency = options.recycle_latency
+ # Connect the dma controller to the network
+ dma_cntrl.responseFromDir = ruby_system.network.slave
+ dma_cntrl.requestToDir = ruby_system.network.master
+
+
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
-
return (cpu_sequencers, dir_cntrl_nodes, topology)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromCache = ruby_system.network.slave
+ l1_cntrl.responseFromCache = ruby_system.network.slave
+ l1_cntrl.forwardFromCache = ruby_system.network.slave
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.forwardToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+
+
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
system.ruby = RubySystem(no_mem_vec = options.use_map)
ruby = system.ruby
- protocol = buildEnv['PROTOCOL']
- exec "import %s" % protocol
- try:
- (cpu_sequencers, dir_cntrls, topology) = \
- eval("%s.create_system(options, system, dma_ports, ruby)"
- % protocol)
- except:
- print "Error: could not create sytem for ruby protocol %s" % protocol
- raise
-
- # Create a port proxy for connecting the system port. This is
- # independent of the protocol and kept in the protocol-agnostic
- # part (i.e. here).
- sys_port_proxy = RubyPortProxy(ruby_system = ruby)
- # Give the system port proxy a SimObject parent without creating a
- # full-fledged controller
- system.sys_port_proxy = sys_port_proxy
-
- # Connect the system port for loading of binaries etc
- system.system_port = system.sys_port_proxy.slave
-
-
- #
# Set the network classes based on the command line options
- #
if options.garnet_network == "fixed":
NetworkClass = GarnetNetwork_d
IntLinkClass = GarnetIntLink_d
RouterClass = Switch
InterfaceClass = None
+ # Instantiate the network object so that the controllers can connect to it.
+ network = NetworkClass(ruby_system = ruby, topology = options.topology,
+ routers = [], ext_links = [], int_links = [], netifs = [])
+ ruby.network = network
+
+ protocol = buildEnv['PROTOCOL']
+ exec "import %s" % protocol
+ try:
+ (cpu_sequencers, dir_cntrls, topology) = \
+ eval("%s.create_system(options, system, dma_ports, ruby)"
+ % protocol)
+ except:
+ print "Error: could not create sytem for ruby protocol %s" % protocol
+ raise
+
+ # Create a port proxy for connecting the system port. This is
+ # independent of the protocol and kept in the protocol-agnostic
+ # part (i.e. here).
+ sys_port_proxy = RubyPortProxy(ruby_system = ruby)
+
+ # Give the system port proxy a SimObject parent without creating a
+ # full-fledged controller
+ system.sys_port_proxy = sys_port_proxy
+
+ # Connect the system port for loading of binaries etc
+ system.system_port = system.sys_port_proxy.slave
# Create the network topology
- network = NetworkClass(ruby_system = ruby, topology = topology.description,
- routers = [], ext_links = [], int_links = [], netifs = [])
topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
RouterClass)
network.enable_fault_model = True
network.fault_model = FaultModel()
- #
# Loop through the directory controlers.
# Determine the total memory size of the ruby system and verify it is equal
# to physmem. However, if Ruby memory is using sparse memory in SE
# mode, then the system should not back-up the memory state with
# the Memory Vector and thus the memory size bytes should stay at 0.
# Also set the numa bits to the appropriate values.
- #
total_mem_size = MemorySize('0B')
ruby.block_size_bytes = options.cacheline_size
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(total_mem_size.value == phys_mem_size)
-
- ruby.network = network
ruby.mem_size = total_mem_size
# Connect the cpu sequencers and the piobus
Cycles request_latency := 2;
Cycles response_latency := 2;
bool send_evictions;
-{
- // NODE L0 CACHE
- // From this node's L0 cache to the network
- MessageBuffer bufferToL1, network="To", physical_network="0", ordered="true";
- // To this node's L0 cache FROM the network
- MessageBuffer bufferFromL1, network="From", physical_network="0", ordered="true";
+ // From this node's L0 cache to the network
+ MessageBuffer * bufferToL1, network="To", ordered="true";
+ // To this node's L0 cache FROM the network
+ MessageBuffer * bufferFromL1, network="From", ordered="true";
+{
// Message queue between this controller and the processor
MessageBuffer mandatoryQueue, ordered="false";
Cycles l1_request_latency := 2;
Cycles l1_response_latency := 2;
Cycles to_l2_latency := 1;
-{
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestToL2, network="To", virtual_network="0", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer responseToL2, network="To", virtual_network="1", ordered="false", vnet_type="response";
- MessageBuffer unblockToL2, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestFromL2, network="From", virtual_network="0", ordered="false", vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer responseFromL2, network="From", virtual_network="1", ordered="false", vnet_type="response";
-
- // Message Buffers between the L1 and the L0 Cache
- // From the L1 cache to the L0 cache
- MessageBuffer bufferToL0, network="To", physical_network="0", ordered="true";
- // From the L0 cache to the L1 cache
- MessageBuffer bufferFromL0, network="From", physical_network="0", ordered="true";
+ // Message Buffers between the L1 and the L0 Cache
+ // From the L1 cache to the L0 cache
+ MessageBuffer * bufferToL0, network="To", ordered="true";
+
+ // From the L0 cache to the L1 cache
+ MessageBuffer * bufferFromL0, network="From", ordered="true";
+
+ // Message queue from this L1 cache TO the network / L2
+ MessageBuffer * requestToL2, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+
+ MessageBuffer * responseToL2, network="To", virtual_network="1",
+ ordered="false", vnet_type="response";
+ MessageBuffer * unblockToL2, network="To", virtual_network="2",
+ ordered="false", vnet_type="unblock";
+
+ // To this L1 cache FROM the network / L2
+ MessageBuffer * requestFromL2, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+ MessageBuffer * responseFromL2, network="From", virtual_network="1",
+ ordered="false", vnet_type="response";
+
+{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
Cycles to_l2_latency := 1;
bool send_evictions;
bool enable_prefetch := "False";
+
+ // Message Queues
+ // From this node's L1 cache TO the network
+
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="unblock";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="response";
{
- // NODE L1 CACHE
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="response";
- MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="response";
// Request Buffer for prefetches
MessageBuffer optionalQueue, ordered="false";
-
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
- *
- */
-
machine(L2Cache, "MESI Directory L2 Cache CMP")
: CacheMemory * L2cache;
Cycles l2_request_latency := 2;
Cycles l2_response_latency := 2;
Cycles to_l1_latency := 1;
-{
- // L2 BANK QUEUES
+
+ // Message Queues
// From local bank of L2 cache TO the network
- MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0",
+ MessageBuffer * DirRequestFromL2Cache, network="To", virtual_network="0",
ordered="false", vnet_type="request"; // this L2 bank -> Memory
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0",
+
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="2",
ordered="false", vnet_type="request"; // this L2 bank -> a local L1
- MessageBuffer responseFromL2Cache, network="To", virtual_network="1",
+
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="1",
ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || Memory
// FROM the network to this local bank of L2 cache
- MessageBuffer unblockToL2Cache, network="From", virtual_network="2",
+ MessageBuffer * unblockToL2Cache, network="From", virtual_network="2",
ordered="false", vnet_type="unblock"; // a local L1 || Memory -> this L2 bank
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0",
+
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
ordered="false", vnet_type="request"; // a local L1 -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="1",
- ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
+{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
// Base states
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-// This file is copied from Yasuko Watanabe's prefetch / memory protocol
-// Copied here by aep 12/14/07
-
-
machine(Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles to_mem_ctrl_latency := 1;
Cycles directory_latency := 6;
-{
- MessageBuffer requestToDir, network="From", virtual_network="0",
+
+ MessageBuffer * requestToDir, network="From", virtual_network="0",
ordered="false", vnet_type="request";
- MessageBuffer responseToDir, network="From", virtual_network="1",
+ MessageBuffer * responseToDir, network="From", virtual_network="1",
ordered="false", vnet_type="response";
- MessageBuffer responseFromDir, network="To", virtual_network="1",
+ MessageBuffer * responseFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="response";
-
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
Cycles request_latency := 6;
-{
-
- MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
error("DMA does not support get data block.");
}
- out_port(reqToDirectory_out, RequestMsg, reqToDirectory, desc="...");
+ out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+ enqueue(requestToDir_out, RequestMsg, request_latency) {
out_msg.Addr := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_READ;
out_msg.DataBlk := in_msg.DataBlk;
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+ enqueue(requestToDir_out, RequestMsg, request_latency) {
out_msg.Addr := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_WRITE;
out_msg.DataBlk := in_msg.DataBlk;
*/
machine(L1Cache, "MI Example L1 Cache")
-: Sequencer * sequencer;
- CacheMemory * cacheMemory;
- Cycles cache_response_latency := 12;
- Cycles issue_latency := 2;
- bool send_evictions;
+ : Sequencer * sequencer;
+ CacheMemory * cacheMemory;
+ Cycles cache_response_latency := 12;
+ Cycles issue_latency := 2;
+ bool send_evictions;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="2",
+ ordered="true", vnet_type="request";
+ MessageBuffer * responseFromCache, network="To", virtual_network="4",
+ ordered="true", vnet_type="response";
+
+ MessageBuffer * forwardToCache, network="From", virtual_network="3",
+ ordered="true", vnet_type="forward";
+ MessageBuffer * responseToCache, network="From", virtual_network="4",
+ ordered="true", vnet_type="response";
{
-
- // NETWORK BUFFERS
- MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
- MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
-
- MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
- MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
-
// STATES
state_declaration(State, desc="Cache states") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";
*/
machine(Directory, "Directory protocol")
-: DirectoryMemory * directory;
- MemoryControl * memBuffer;
- Cycles directory_latency := 12;
+ : DirectoryMemory * directory;
+ MemoryControl * memBuffer;
+ Cycles directory_latency := 12;
+
+ MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+ ordered="false", vnet_type="forward";
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+ ordered="true", vnet_type="response";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ ordered="true", vnet_type="request";
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ ordered="true", vnet_type="request";
{
-
- MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
- MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
-
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
- MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
-
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
*/
machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-{
-
- MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
error("DMA Controller does not support getDataBlock function.\n");
}
- out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+ out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
Cycles request_latency := 2;
Cycles use_timeout_latency := 50;
bool send_evictions;
-{
-
- // NODE L1 CACHE
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
-// MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
-
-
+ // Message Queues
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="response";
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
+ ordered="false", vnet_type="request";
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="response";
+{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
: CacheMemory * L2cache;
Cycles response_latency := 2;
Cycles request_latency := 2;
-{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false", vnet_type="request"; // this L2 bank -> a local L1
- MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request"; // this L2 bank -> mod-directory
- MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="0",
+ ordered="false", vnet_type="request"; // this L2 bank -> a local L1
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="request"; // this L2 bank -> mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
// FROM the network to this local bank of L2 cache
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false", vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
- MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request"; // mod-directory -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
-// MessageBuffer L1WritebackToL2Cache, network="From", virtual_network="3", ordered="false", vnet_type="writeback";
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
+ ordered="false", vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="request"; // mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
+{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles directory_latency := 6;
-{
-
- // ** IN QUEUES **
- MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
- MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
- MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
- MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
+ // Message Queues
+ MessageBuffer * requestToDir, network="From", virtual_network="1",
+ ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
+ MessageBuffer * forwardFromDir, network="To", virtual_network="1",
+ ordered="false", vnet_type="forward";
+ MessageBuffer * responseFromDir, network="To", virtual_network="2",
+ ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 14;
- Cycles response_latency := 14;
-{
- MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
+machine(DMA, "DMA Controller")
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 14;
+ Cycles response_latency := 14;
+
+ MessageBuffer * responseFromDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * reqToDir, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+ MessageBuffer * respToDir, network="To", virtual_network="2",
+ ordered="false", vnet_type="dmaresponse";
- MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
- MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
-
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
-
+
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
}
transition(BUSY_WR, All_Acks, READY) {
- a_ackCallback;
+ a_ackCallback;
u_sendExclusiveUnblockToDir;
w_deallocateTBE;
p_popTriggerQueue;
bool dynamic_timeout_enabled := "True";
bool no_mig_atomic := "True";
bool send_evictions;
-{
-
- // From this node's L1 cache TO the network
-
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
+ // Message Queues
+ // From this node's L1 cache TO the network
+
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+
+
+ // To this node's L1 cache FROM the network
+
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="request";
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
-
+{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
Cycles l2_request_latency := 5;
Cycles l2_response_latency := 5;
bool filtering_enabled := "True";
-{
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
-
- // this L2 bank -> a local L1 || mod-directory
- MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
- // this L2 bank -> mod-directory
- MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
- // this L2 bank -> a local L1
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+
+ // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ // this L2 bank -> mod-directory
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="request";
+ // this L2 bank -> a local L1
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+
+
+ // FROM the network to this local bank of L2 cache
+
+ // a local L1 || mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+ // mod-directory -> this L2 bank
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="request";
- // FROM the network to this local bank of L2 cache
-
- // a local L1 || mod-directory -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
- // mod-directory -> this L2 bank
- MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
-
+{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
// Base states
bool distributed_persistent := "True";
Cycles fixed_timeout_latency := 100;
Cycles reissue_wakeup_latency := 10;
-{
- MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
- MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
- MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
+ // Message Queues from dir to other controllers / network
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
+ ordered="true", vnet_type="response";
+
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * persistentFromDir, network="To", virtual_network="3",
+ ordered="true", vnet_type="persistent";
- MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
- MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
+ MessageBuffer * requestFromDir, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+
+ // Message Queues to dir from other controllers / network
+ MessageBuffer * responseToDir, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentToDir, network="From", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ ordered="true", vnet_type="request";
+
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_O") {
// Base states
machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-{
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
- MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ // Messsage Queues
+ MessageBuffer * responseFromDir, network="From", virtual_network="5",
+ ordered="true", vnet_type="response";
+ MessageBuffer * reqToDirectory, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
*/
machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
-: Sequencer * sequencer;
- CacheMemory * L1Icache;
- CacheMemory * L1Dcache;
- CacheMemory * L2cache;
- Cycles cache_response_latency := 10;
- Cycles issue_latency := 2;
- Cycles l2_cache_hit_latency := 10;
- bool no_mig_atomic := "True";
- bool send_evictions;
+ : Sequencer * sequencer;
+ CacheMemory * L1Icache;
+ CacheMemory * L1Dcache;
+ CacheMemory * L2cache;
+ Cycles cache_response_latency := 10;
+ Cycles issue_latency := 2;
+ Cycles l2_cache_hit_latency := 10;
+ bool no_mig_atomic := "True";
+ bool send_evictions;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="2",
+ ordered="false", vnet_type="request";
+ MessageBuffer * responseFromCache, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * unblockFromCache, network="To", virtual_network="5",
+ ordered="false", vnet_type="unblock";
+
+ MessageBuffer * forwardToCache, network="From", virtual_network="3",
+ ordered="false", vnet_type="forward";
+ MessageBuffer * responseToCache, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
{
-
- // NETWORK BUFFERS
- MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
- MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
-
- MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
- MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
-
-
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
*/
machine(Directory, "AMD Hammer-like protocol")
-: DirectoryMemory * directory;
- CacheMemory * probeFilter;
- MemoryControl * memBuffer;
- Cycles memory_controller_latency := 2;
- bool probe_filter_enabled := "False";
- bool full_bit_dir_enabled := "False";
-{
+ : DirectoryMemory * directory;
+ CacheMemory * probeFilter;
+ MemoryControl * memBuffer;
+ Cycles memory_controller_latency := 2;
+ bool probe_filter_enabled := "False";
+ bool full_bit_dir_enabled := "False";
- MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
- MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
- //
- // For a finite buffered network, note that the DMA response network only
- // works at this relatively lower numbered (lower priority) virtual network
- // because the trigger queue decouples cache responses from DMA responses.
- //
- MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
+ MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+ ordered="false", vnet_type="forward";
+
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
- MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
- MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
- MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
+ // For a finite buffered network, note that the DMA response network only
+ // works at this relatively lower numbered (lower priority) virtual network
+ // because the trigger queue decouples cache responses from DMA responses.
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * unblockToDir, network="From", virtual_network="5",
+ ordered="false", vnet_type="unblock";
+
+ MessageBuffer * responseToDir, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="request", recycle_latency="1";
+
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ ordered="true", vnet_type="request";
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_E") {
// Base states
machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-{
-
- MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
- state_declaration(State,
- desc="DMA states",
- default="DMA_State_READY") {
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
error("DMA Controller does not support getDataBlock function.\n");
}
- out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+ out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
machine(L1Cache, "Network_test L1 Cache")
-: Sequencer * sequencer;
- Cycles issue_latency := 2;
+ : Sequencer * sequencer;
+ Cycles issue_latency := 2;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="0",
+ ordered="false", vnet_type = "request";
+ MessageBuffer * forwardFromCache, network="To", virtual_network="1",
+ ordered="false", vnet_type = "forward";
+ MessageBuffer * responseFromCache, network="To", virtual_network="2",
+ ordered="false", vnet_type = "response";
{
-
- // NETWORK BUFFERS
- MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false", vnet_type = "request";
- MessageBuffer forwardFromCache, network="To", virtual_network="1", ordered="false", vnet_type = "forward";
- MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false", vnet_type = "response";
-
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";
machine(Directory, "Network_test Directory")
-:
+ : MessageBuffer * requestToDir, network="From", virtual_network="0",
+ ordered="false", vnet_type = "request";
+ MessageBuffer * forwardToDir, network="From", virtual_network="1",
+ ordered="false", vnet_type = "forward";
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type = "response";
{
-
- MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type = "request";
- MessageBuffer forwardToDir, network="From", virtual_network="1", ordered="false", vnet_type = "forward";
- MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type = "response";
-
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
// Queues that are feeding the protocol
m_fromNetQueues.resize(m_nodes);
- for (int node = 0; node < m_nodes; node++) {
- // Setting number of virtual message buffers per Network Queue
- m_toNetQueues[node].resize(m_virtual_networks);
- m_fromNetQueues[node].resize(m_virtual_networks);
-
- // Instantiating the Message Buffers that
- // interact with the coherence protocol
- for (int j = 0; j < m_virtual_networks; j++) {
- m_toNetQueues[node][j] = new MessageBuffer();
- m_fromNetQueues[node][j] = new MessageBuffer();
- }
- }
-
m_in_use.resize(m_virtual_networks);
m_ordered.resize(m_virtual_networks);
Network::~Network()
{
for (int node = 0; node < m_nodes; node++) {
+
// Delete the Message Buffers
- for (int j = 0; j < m_virtual_networks; j++) {
- delete m_toNetQueues[node][j];
- delete m_fromNetQueues[node][j];
+ for (auto& it : m_toNetQueues[node]) {
+ delete it.second;
+ }
+
+ for (auto& it : m_fromNetQueues[node]) {
+ delete it.second;
}
}
static uint32_t MessageSizeType_to_int(MessageSizeType size_type);
// returns the queue requested for the given component
- virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered,
- int netNumber, std::string vnet_type) = 0;
- virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered,
- int netNumber, std::string vnet_type) = 0;
-
+ virtual void setToNetQueue(NodeID id, bool ordered, int netNumber,
+ std::string vnet_type, MessageBuffer *b) = 0;
+ virtual void setFromNetQueue(NodeID id, bool ordered, int netNumber,
+ std::string vnet_type, MessageBuffer *b) = 0;
virtual void makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
LinkDirection direction,
static uint32_t m_data_msg_size;
// vector of queues from the components
- std::vector<std::vector<MessageBuffer*> > m_toNetQueues;
- std::vector<std::vector<MessageBuffer*> > m_fromNetQueues;
+ std::vector<std::map<int, MessageBuffer*> > m_toNetQueues;
+ std::vector<std::map<int, MessageBuffer*> > m_fromNetQueues;
std::vector<bool> m_in_use;
std::vector<bool> m_ordered;
# Brad Beckmann
from m5.params import *
-from m5.SimObject import SimObject
from ClockedObject import ClockedObject
from BasicLink import BasicLink
netifs = VectorParam.ClockedObject("Network Interfaces")
ext_links = VectorParam.BasicExtLink("Links to external nodes")
int_links = VectorParam.BasicIntLink("Links between internal nodes")
+
+ slave = VectorSlavePort("CPU slave port")
+ master = VectorMasterPort("CPU master port")
Network::init();
}
-MessageBuffer*
-BaseGarnetNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
- string vnet_type)
+void
+BaseGarnetNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
+ string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num, vnet_type);
- return m_toNetQueues[id][network_num];
+ m_toNetQueues[id][network_num] = b;
}
-MessageBuffer*
-BaseGarnetNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
- string vnet_type)
+void
+BaseGarnetNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
+ string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num, vnet_type);
- return m_fromNetQueues[id][network_num];
+ m_fromNetQueues[id][network_num] = b;
}
void
m_queueing_latency[vnet] += latency;
}
- // returns the queue requested for the given component
- MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type);
- MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type);
-
+ // set the queue
+ void setToNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
+ void setFromNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
m_vc_round_robin = 0;
m_ni_buffers.resize(m_num_vcs);
m_ni_enqueue_time.resize(m_num_vcs);
- inNode_ptr.resize(m_virtual_networks);
- outNode_ptr.resize(m_virtual_networks);
creditQueue = new flitBuffer_d();
// instantiating the NI flit buffers
}
void
-NetworkInterface_d::addNode(vector<MessageBuffer *>& in,
- vector<MessageBuffer *>& out)
+NetworkInterface_d::addNode(map<int, MessageBuffer *>& in,
+ map<int, MessageBuffer *>& out)
{
- assert(in.size() == m_virtual_networks);
inNode_ptr = in;
outNode_ptr = out;
- for (int j = 0; j < m_virtual_networks; j++) {
+ for (auto& it : in) {
// the protocol injects messages into the NI
- inNode_ptr[j]->setConsumer(this);
- inNode_ptr[j]->setReceiver(this);
- outNode_ptr[j]->setSender(this);
+ it.second->setConsumer(this);
+ it.second->setReceiver(this);
+ }
+
+ for (auto& it : out) {
+ it.second->setSender(this);
}
}
// Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- while (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
- msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+ for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
+ int vnet = (*it).first;
+ MessageBuffer *b = (*it).second;
+
+ while (b->isReady()) { // Is there a message waiting
+ msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- inNode_ptr[vnet]->dequeue();
+ b->dequeue();
} else {
break;
}
void
NetworkInterface_d::checkReschedule()
{
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
+ for (const auto& it : inNode_ptr) {
+ MessageBuffer *b = it.second;
+
+ while (b->isReady()) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
}
+
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
void addOutPort(NetworkLink_d *out_link, CreditLink_d *credit_link);
void wakeup();
- void addNode(std::vector<MessageBuffer *> &inNode,
- std::vector<MessageBuffer *> &outNode);
+ void addNode(std::map<int, MessageBuffer *> &inNode,
+ std::map<int, MessageBuffer *> &outNode);
+
void print(std::ostream& out) const;
int get_vnet(int vc);
void init_net_ptr(GarnetNetwork_d *net_ptr) { m_net_ptr = net_ptr; }
std::vector<Cycles> m_ni_enqueue_time;
// The Message buffers that takes messages from the protocol
- std::vector<MessageBuffer *> inNode_ptr;
+ std::map<int, MessageBuffer *> inNode_ptr;
// The Message buffers that provides messages to the protocol
- std::vector<MessageBuffer *> outNode_ptr;
+ std::map<int, MessageBuffer *> outNode_ptr;
bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
int calculateVC(int vnet);
m_virtual_networks = p->virt_nets;
m_vc_per_vnet = p->vcs_per_vnet;
m_num_vcs = m_vc_per_vnet*m_virtual_networks;
-
m_vc_round_robin = 0;
- m_ni_buffers.resize(m_num_vcs);
- inNode_ptr.resize(m_virtual_networks);
- outNode_ptr.resize(m_virtual_networks);
// instantiating the NI flit buffers
+ m_ni_buffers.resize(m_num_vcs);
for (int i =0; i < m_num_vcs; i++)
m_ni_buffers[i] = new flitBuffer();
}
void
-NetworkInterface::addNode(vector<MessageBuffer*>& in,
- vector<MessageBuffer*>& out)
+NetworkInterface::addNode(map<int, MessageBuffer*>& in,
+ map<int, MessageBuffer*>& out)
{
- assert(in.size() == m_virtual_networks);
inNode_ptr = in;
outNode_ptr = out;
- // protocol injects messages into the NI
- for (int j = 0; j < m_virtual_networks; j++) {
- inNode_ptr[j]->setConsumer(this);
- inNode_ptr[j]->setReceiver(this);
- outNode_ptr[j]->setSender(this);
+ for (auto& it: in) {
+ // the protocol injects messages into the NI
+ it.second->setConsumer(this);
+ it.second->setReceiver(this);
+ }
+
+ for (auto& it : out) {
+ it.second->setSender(this);
}
}
//Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- while (inNode_ptr[vnet]->isReady()) // Is there a message waiting
- {
- msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+ for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
+ int vnet = (*it).first;
+ MessageBuffer *b = (*it).second;
+
+ while (b->isReady()) { // Is there a message waiting
+ msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- inNode_ptr[vnet]->dequeue();
+ b->dequeue();
} else {
break;
}
void
NetworkInterface::checkReschedule()
{
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
+ for (const auto& it : inNode_ptr) {
+ MessageBuffer *b = it.second;
+
+ while (b->isReady()) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
}
+
for (int vc = 0; vc < m_num_vcs; vc++) {
- if (m_ni_buffers[vc]->isReadyForNext(curCycle())) {
+ if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
return;
}
void addInPort(NetworkLink *in_link);
void addOutPort(NetworkLink *out_link);
+ void addNode(std::map<int, MessageBuffer *> &inNode,
+ std::map<int, MessageBuffer *> &outNode);
void wakeup();
- void addNode(std::vector<MessageBuffer *> &inNode,
- std::vector<MessageBuffer *> &outNode);
void grant_vc(int out_port, int vc, Cycles grant_time);
void release_vc(int out_port, int vc, Cycles release_time);
std::vector<flitBuffer *> m_ni_buffers;
// The Message buffers that takes messages from the protocol
- std::vector<MessageBuffer *> inNode_ptr;
+ std::map<int, MessageBuffer *> inNode_ptr;
// The Message buffers that provides messages to the protocol
- std::vector<MessageBuffer *> outNode_ptr;
+ std::map<int, MessageBuffer *> outNode_ptr;
bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
int calculateVC(int vnet);
{
for (int port = 0; port < m_out_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
- if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
+ if (m_router_buffers[port][vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
return;
}
return false;
}
-bool
-flitBuffer::isReadyForNext(Cycles curTime)
-{
- if (m_buffer.size() != 0 ) {
- flit *t_flit = m_buffer.front();
- if (t_flit->get_time() <= (curTime + 1))
- return true;
- }
- return false;
-}
-
bool
flitBuffer::isFull()
{
flitBuffer(int maximum_size);
bool isReady(Cycles curTime);
- bool isReadyForNext(Cycles curTime);
bool isFull();
bool isEmpty();
void setMaxSize(int maximum);
{
m_network_ptr = network_ptr;
- for(int i = 0;i < m_virtual_networks;++i)
- {
+ for(int i = 0;i < m_virtual_networks;++i) {
m_pending_message_count.push_back(0);
}
}
void
-PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
+PerfectSwitch::addInPort(const map<int, MessageBuffer*>& in)
{
- assert(in.size() == m_virtual_networks);
NodeID port = m_in.size();
m_in.push_back(in);
- for (int j = 0; j < m_virtual_networks; j++) {
- m_in[port][j]->setConsumer(this);
+ for (auto& it : in) {
+ it.second->setConsumer(this);
string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
- to_string(m_switch_id), to_string(port), to_string(j));
- m_in[port][j]->setDescription(desc);
- m_in[port][j]->setIncomingLink(port);
- m_in[port][j]->setVnet(j);
+ to_string(m_switch_id), to_string(port), to_string(it.first));
+
+ it.second->setDescription(desc);
+ it.second->setIncomingLink(port);
+ it.second->setVnet(it.first);
}
}
void
-PerfectSwitch::addOutPort(const vector<MessageBuffer*>& out,
+PerfectSwitch::addOutPort(const map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry)
{
- assert(out.size() == m_virtual_networks);
-
// Setup link order
LinkOrder l;
l.m_value = 0;
vector<NetDest> output_link_destinations;
// Is there a message waiting?
- while (m_in[incoming][vnet]->isReady()) {
+ auto it = m_in[incoming].find(vnet);
+ if (it == m_in[incoming].end())
+ continue;
+ MessageBuffer *buffer = (*it).second;
+
+ while (buffer->isReady()) {
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// Peek at message
- msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
+ msg_ptr = buffer->peekMsgPtr();
net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
}
// Dequeue msg
- m_in[incoming][vnet]->dequeue();
+ buffer->dequeue();
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
{ return csprintf("PerfectSwitch-%i", m_switch_id); }
void init(SimpleNetwork *);
- void addInPort(const std::vector<MessageBuffer*>& in);
- void addOutPort(const std::vector<MessageBuffer*>& out,
+ void addInPort(const std::map<int, MessageBuffer*>& in);
+ void addOutPort(const std::map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry);
+
int getInLinks() const { return m_in.size(); }
int getOutLinks() const { return m_out.size(); }
SwitchID m_switch_id;
// vector of queues from the components
- std::vector<std::vector<MessageBuffer*> > m_in;
- std::vector<std::vector<MessageBuffer*> > m_out;
+ std::vector<std::map<int, MessageBuffer*> > m_in;
+ std::vector<std::map<int, MessageBuffer*> > m_out;
+
std::vector<NetDest> m_routing_table;
std::vector<LinkOrder> m_link_order;
SimpleExtLink *simple_link = safe_cast<SimpleExtLink*>(link);
- m_switches[src]->addOutPort(m_fromNetQueues[dest],
- routing_table_entry,
- simple_link->m_latency,
- simple_link->m_bw_multiplier);
+ m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
+ simple_link->m_latency,
+ simple_link->m_bw_multiplier);
m_endpoint_switches[dest] = m_switches[src];
}
const NetDest& routing_table_entry)
{
// Create a set of new MessageBuffers
- std::vector<MessageBuffer*> queues;
+ std::map<int, MessageBuffer*> queues;
for (int i = 0; i < m_virtual_networks; i++) {
// allocate a buffer
MessageBuffer* buffer_ptr = new MessageBuffer;
buffer_ptr->setOrdering(true);
+
if (m_buffer_size > 0) {
buffer_ptr->resize(m_buffer_size);
}
- queues.push_back(buffer_ptr);
+
+ queues[i] = buffer_ptr;
// remember to deallocate it
m_buffers_to_free.push_back(buffer_ptr);
}
+
// Connect it to the two switches
SimpleIntLink *simple_link = safe_cast<SimpleIntLink*>(link);
m_switches[dest]->addInPort(queues);
m_switches[src]->addOutPort(queues, routing_table_entry,
- simple_link->m_latency,
- simple_link->m_bw_multiplier);
+ simple_link->m_latency,
+ simple_link->m_bw_multiplier);
}
void
m_in_use[network_num] = true;
}
-MessageBuffer*
-SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type)
+void
+SimpleNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num);
- return m_toNetQueues[id][network_num];
+ m_toNetQueues[id][network_num] = b;
}
-MessageBuffer*
-SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type)
+void
+SimpleNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num);
- return m_fromNetQueues[id][network_num];
+ m_fromNetQueues[id][network_num] = b;
}
void
void collateStats();
void regStats();
- // returns the queue requested for the given component
- MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
- MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
+ // sets the queue requested
+ void setToNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
+ void setFromNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
// Private copy constructor and assignment operator
SimpleNetwork(const SimpleNetwork& obj);
SimpleNetwork& operator=(const SimpleNetwork& obj);
+
std::vector<Switch*> m_switches;
std::vector<MessageBuffer*> m_buffers_to_free;
std::vector<Switch*> m_endpoint_switches;
}
void
-Switch::addInPort(const vector<MessageBuffer*>& in)
+Switch::addInPort(const map<int, MessageBuffer*>& in)
{
m_perfect_switch->addInPort(in);
- for (int i = 0; i < in.size(); i++) {
- in[i]->setReceiver(this);
+ for (auto& it : in) {
+ it.second->setReceiver(this);
}
}
void
-Switch::addOutPort(const vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
+Switch::addOutPort(const map<int, MessageBuffer*>& out,
+ const NetDest& routing_table_entry,
+ Cycles link_latency, int bw_multiplier)
{
// Create a throttle
Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
- link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
- this);
+ link_latency, bw_multiplier,
+ m_network_ptr->getEndpointBandwidth(),
+ this);
+
m_throttles.push_back(throttle_ptr);
// Create one buffer per vnet (these are intermediaryQueues)
- vector<MessageBuffer*> intermediateBuffers;
- for (int i = 0; i < out.size(); i++) {
- out[i]->setSender(this);
+ map<int, MessageBuffer*> intermediateBuffers;
+
+ for (auto& it : out) {
+ it.second->setSender(this);
MessageBuffer* buffer_ptr = new MessageBuffer;
// Make these queues ordered
buffer_ptr->resize(m_network_ptr->getBufferSize());
}
- intermediateBuffers.push_back(buffer_ptr);
+ intermediateBuffers[it.first] = buffer_ptr;
m_buffers_to_free.push_back(buffer_ptr);
buffer_ptr->setSender(this);
typedef SwitchParams Params;
Switch(const Params *p);
~Switch();
-
void init();
- void addInPort(const std::vector<MessageBuffer*>& in);
- void addOutPort(const std::vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, Cycles link_latency,
- int bw_multiplier);
+
+ void addInPort(const std::map<int, MessageBuffer*>& in);
+ void addOutPort(const std::map<int, MessageBuffer*>& out,
+ const NetDest& routing_table_entry,
+ Cycles link_latency, int bw_multiplier);
+
const Throttle* getThrottle(LinkID link_number) const;
void resetStats();
int link_bandwidth_multiplier, int endpoint_bandwidth)
{
m_node = node;
- m_vnets = 0;
-
assert(link_bandwidth_multiplier > 0);
m_link_bandwidth_multiplier = link_bandwidth_multiplier;
+
m_link_latency = link_latency;
m_endpoint_bandwidth = endpoint_bandwidth;
m_wakeups_wo_switch = 0;
-
m_link_utilization_proxy = 0;
}
void
-Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
- const std::vector<MessageBuffer*>& out_vec)
+Throttle::addLinks(const map<int, MessageBuffer*>& in_vec,
+ const map<int, MessageBuffer*>& out_vec)
{
assert(in_vec.size() == out_vec.size());
- for (int i=0; i<in_vec.size(); i++) {
- addVirtualNetwork(in_vec[i], out_vec[i]);
+
+ for (auto& it : in_vec) {
+ int vnet = it.first;
+
+ auto jt = out_vec.find(vnet);
+ assert(jt != out_vec.end());
+
+ MessageBuffer *in_ptr = it.second;
+ MessageBuffer *out_ptr = (*jt).second;
+
+ m_in[vnet] = in_ptr;
+ m_out[vnet] = out_ptr;
+ m_units_remaining[vnet] = 0;
+
+ // Set consumer and description
+ in_ptr->setConsumer(this);
+ string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
+ to_string(m_node) + "]";
+ in_ptr->setDescription(desc);
}
}
void
-Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
+Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
+ MessageBuffer *in, MessageBuffer *out)
{
- m_units_remaining.push_back(0);
- m_in.push_back(in_ptr);
- m_out.push_back(out_ptr);
+ assert(out != NULL);
+ assert(in != NULL);
+ assert(m_units_remaining[vnet] >= 0);
+
+ while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
+ out->areNSlotsAvailable(1)) {
+
+ // See if we are done transferring the previous message on
+ // this virtual network
+ if (m_units_remaining[vnet] == 0 && in->isReady()) {
+ // Find the size of the message we are moving
+ MsgPtr msg_ptr = in->peekMsgPtr();
+ NetworkMessage* net_msg_ptr =
+ safe_cast<NetworkMessage*>(msg_ptr.get());
+ m_units_remaining[vnet] +=
+ network_message_to_size(net_msg_ptr);
+
+ DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
+ "enqueueing net msg %d time: %lld.\n",
+ m_node, getLinkBandwidth(), m_units_remaining[vnet],
+ g_system_ptr->curCycle());
+
+ // Move the message
+ in->dequeue();
+ out->enqueue(msg_ptr, m_link_latency);
+
+ // Count the message
+ m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
+ DPRINTF(RubyNetwork, "%s\n", *out);
+ }
+
+ // Calculate the amount of bandwidth we spent on this message
+ int diff = m_units_remaining[vnet] - bw_remaining;
+ m_units_remaining[vnet] = max(0, diff);
+ bw_remaining = max(0, -diff);
+ }
- // Set consumer and description
- m_in[m_vnets]->setConsumer(this);
+ if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
+ !out->areNSlotsAvailable(1)) {
+ DPRINTF(RubyNetwork, "vnet: %d", vnet);
- string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
- to_string(m_node) + "]";
- m_in[m_vnets]->setDescription(desc);
- m_vnets++;
+ // schedule me to wakeup again because I'm waiting for my
+ // output queue to become available
+ schedule_wakeup = true;
+ }
}
void
assert(getLinkBandwidth() > 0);
int bw_remaining = getLinkBandwidth();
- // Give the highest numbered link priority most of the time
m_wakeups_wo_switch++;
- int highest_prio_vnet = m_vnets-1;
- int lowest_prio_vnet = 0;
- int counter = 1;
bool schedule_wakeup = false;
+ // variable for deciding the direction in which to iterate
+ bool iteration_direction = false;
+
+
// invert priorities to avoid starvation seen in the component network
if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
m_wakeups_wo_switch = 0;
- highest_prio_vnet = 0;
- lowest_prio_vnet = m_vnets-1;
- counter = -1;
+ iteration_direction = true;
}
- for (int vnet = highest_prio_vnet;
- (vnet * counter) >= (counter * lowest_prio_vnet);
- vnet -= counter) {
-
- assert(m_out[vnet] != NULL);
- assert(m_in[vnet] != NULL);
- assert(m_units_remaining[vnet] >= 0);
-
- while (bw_remaining > 0 &&
- (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
- m_out[vnet]->areNSlotsAvailable(1)) {
-
- // See if we are done transferring the previous message on
- // this virtual network
- if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
- // Find the size of the message we are moving
- MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
- NetworkMessage* net_msg_ptr =
- safe_cast<NetworkMessage*>(msg_ptr.get());
- m_units_remaining[vnet] +=
- network_message_to_size(net_msg_ptr);
-
- DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
- "enqueueing net msg %d time: %lld.\n",
- m_node, getLinkBandwidth(), m_units_remaining[vnet],
- g_system_ptr->curCycle());
-
- // Move the message
- m_in[vnet]->dequeue();
- m_out[vnet]->enqueue(msg_ptr, m_link_latency);
-
- // Count the message
- m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
-
- DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
- }
-
- // Calculate the amount of bandwidth we spent on this message
- int diff = m_units_remaining[vnet] - bw_remaining;
- m_units_remaining[vnet] = max(0, diff);
- bw_remaining = max(0, -diff);
+ if (iteration_direction) {
+ for (auto& it : m_in) {
+ int vnet = it.first;
+ operateVnet(vnet, bw_remaining, schedule_wakeup,
+ it.second, m_out[vnet]);
}
-
- if (bw_remaining > 0 &&
- (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
- !m_out[vnet]->areNSlotsAvailable(1)) {
- DPRINTF(RubyNetwork, "vnet: %d", vnet);
- // schedule me to wakeup again because I'm waiting for my
- // output queue to become available
- schedule_wakeup = true;
+ } else {
+ for (auto it = m_in.rbegin(); it != m_in.rend(); ++it) {
+ int vnet = (*it).first;
+ operateVnet(vnet, bw_remaining, schedule_wakeup,
+ (*it).second, m_out[vnet]);
}
}
for (MessageSizeType type = MessageSizeType_FIRST;
type < MessageSizeType_NUM; ++type) {
m_msg_counts[(unsigned int)type]
- .init(m_vnets)
+ .init(Network::getNumberOfVirtualNetworks())
.name(parent + csprintf(".throttle%i", m_node) + ".msg_count." +
MessageSizeType_to_string(type))
.flags(Stats::nozero)
std::string name()
{ return csprintf("Throttle-%i", m_sID); }
- void addLinks(const std::vector<MessageBuffer*>& in_vec,
- const std::vector<MessageBuffer*>& out_vec);
+ void addLinks(const std::map<int, MessageBuffer*>& in_vec,
+ const std::map<int, MessageBuffer*>& out_vec);
void wakeup();
// The average utilization (a fraction) since last clearStats()
private:
void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth);
- void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
+ void operateVnet(int vnet, int &bw_remainin, bool &schedule_wakeup,
+ MessageBuffer *in, MessageBuffer *out);
// Private copy constructor and assignment operator
Throttle(const Throttle& obj);
Throttle& operator=(const Throttle& obj);
- std::vector<MessageBuffer*> m_in;
- std::vector<MessageBuffer*> m_out;
- unsigned int m_vnets;
- std::vector<int> m_units_remaining;
+ std::map<int, MessageBuffer*> m_in;
+ std::map<int, MessageBuffer*> m_out;
+ std::map<int, int> m_units_remaining;
+
int m_sID;
NodeID m_node;
int m_link_bandwidth_multiplier;
m_delayVCHistogram[virtualNetwork]->sample(delay);
}
-void
-AbstractController::connectWithPeer(AbstractController *c)
-{
- getQueuesFromPeer(c);
- c->getQueuesFromPeer(this);
-}
-
void
AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
{
virtual void collateStats()
{fatal("collateStats() should be overridden!");}
+ //! Set the message buffer with given name.
+ virtual void setNetQueue(const std::string& name, MessageBuffer *b) = 0;
+
public:
MachineID getMachineID() const { return m_machineID; }
Stats::Histogram& getDelayVCHist(uint32_t index)
{ return *(m_delayVCHistogram[index]); }
- MessageBuffer *getPeerQueue(uint32_t pid)
- {
- std::map<uint32_t, MessageBuffer *>::iterator it =
- peerQueueMap.find(pid);
- assert(it != peerQueueMap.end());
- return (*it).second;
- }
-
protected:
//! Profiles original cache requests including PUTs
void profileRequest(const std::string &request);
//! Profiles the delay associated with messages.
void profileMsgDelay(uint32_t virtualNetwork, Cycles delay);
- //! Function for connecting peer controllers
- void connectWithPeer(AbstractController *);
- virtual void getQueuesFromPeer(AbstractController *)
- { fatal("getQueuesFromPeer() should be called only if implemented!"); }
-
void stallBuffer(MessageBuffer* buf, Address addr);
void wakeUpBuffers(Address addr);
void wakeUpAllBuffers(Address addr);
unsigned int m_buffer_size;
Cycles m_recycle_latency;
- //! Map from physical network number to the Message Buffer.
- std::map<uint32_t, MessageBuffer*> peerQueueMap;
-
//! Counter for the number of cycles when the transitions carried out
//! were equal to the maximum allowed
Stats::Scalar m_fully_busy_cycles;
def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None
+
+ # Data members in the State Machine that have been declared before
+ # the opening brace '{' of the machine. Note that these along with
+ # the members in self.objects form the entire set of data members.
self.config_parameters = config_parameters
+
self.prefetchers = []
for param in config_parameters:
self.transitions = []
self.in_ports = []
self.functions = []
+
+ # Data members in the State Machine that have been declared inside
+ # the {} machine. Note that these along with the config params
+ # form the entire set of data members of the machine.
self.objects = []
self.TBEType = None
self.EntryType = None
if param.rvalue is not None:
dflt_str = str(param.rvalue.inline()) + ', '
- if python_class_map.has_key(param.type_ast.type.c_ident):
+ if param.type_ast.type.c_ident == "MessageBuffer":
+ if param["network"] == "To":
+ code('${{param.ident}} = MasterPort(${dflt_str}"")')
+ else:
+ code('${{param.ident}} = SlavePort(${dflt_str}"")')
+
+ elif python_class_map.has_key(param.type_ast.type.c_ident):
python_type = python_class_map[param.type_ast.type.c_ident]
code('${{param.ident}} = Param.${{python_type}}(${dflt_str}"")')
''')
seen_types = set()
- has_peer = False
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
- if "network" in var and "physical_network" in var:
- has_peer = True
- seen_types.add(var.type.ident)
+ seen_types.add(var.type.ident)
# for adding information to the protocol debug trace
code('''
$c_ident(const Params *p);
static int getNumControllers();
void init();
+
MessageBuffer* getMandatoryQueue() const;
+ void setNetQueue(const std::string& name, MessageBuffer *b);
void print(std::ostream& out) const;
void wakeup();
if proto:
code('$proto')
- if has_peer:
- code('void getQueuesFromPeer(AbstractController *);')
if self.EntryType != None:
code('''
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
- has_peer = False
code('''
/** \\file $c_ident.cc
# include a sequencer, connect the it to the controller.
#
for param in self.config_parameters:
+
+ # Do not initialize messgage buffers since they are initialized
+ # when the port based connections are made.
+ if param.type_ast.type.c_ident == "MessageBuffer":
+ continue
+
if param.pointer:
code('m_${{param.ident}}_ptr = p->${{param.ident}};')
else:
code('m_${{param.ident}} = p->${{param.ident}};')
+
if re.compile("sequencer").search(param.ident):
code('m_${{param.ident}}_ptr->setController(this);')
code('''
m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
m_${{var.ident}}_ptr->setReceiver(this);
-''')
- else:
- if "network" in var and "physical_network" in var and \
- var["network"] == "To":
- has_peer = True
- code('''
-m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
-peerQueueMap[${{var["physical_network"]}}] = m_${{var.ident}}_ptr;
-m_${{var.ident}}_ptr->setSender(this);
''')
code('''
-if (p->peer != NULL)
- connectWithPeer(p->peer);
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
}
void
-$c_ident::init()
+$c_ident::setNetQueue(const std::string& name, MessageBuffer *b)
{
- MachineType machine_type = string_to_MachineType("${{var.machine.ident}}");
+ MachineType machine_type = string_to_MachineType("${{self.ident}}");
int base M5_VAR_USED = MachineType_base_number(machine_type);
+''')
+ code.indent()
+
+ # set for maintaining the vnet, direction pairs already seen for this
+ # machine. This map helps in implementing the check for avoiding
+ # multiple message buffers being mapped to the same vnet.
+ vnet_dir_set = set()
+
+ for var in self.config_parameters:
+ if "network" in var:
+ vtype = var.type_ast.type
+ vid = "m_%s_ptr" % var.ident
+
+ code('''
+if ("${{var.ident}}" == name) {
+ $vid = b;
+ assert($vid != NULL);
+''')
+ code.indent()
+ # Network port object
+ network = var["network"]
+ ordered = var["ordered"]
+
+ if "virtual_network" in var:
+ vnet = var["virtual_network"]
+ vnet_type = var["vnet_type"]
+
+ assert (vnet, network) not in vnet_dir_set
+ vnet_dir_set.add((vnet,network))
+
+ code('''
+m_net_ptr->set${network}NetQueue(m_version + base, $ordered, $vnet,
+ "$vnet_type", b);
+''')
+ # Set the end
+ if network == "To":
+ code('$vid->setSender(this);')
+ else:
+ code('$vid->setReceiver(this);')
+
+ # Set ordering
+ code('$vid->setOrdering(${{var["ordered"]}});')
+
+ # Set randomization
+ if "random" in var:
+ # A buffer
+ code('$vid->setRandomization(${{var["random"]}});')
+
+ # Set Priority
+ if "rank" in var:
+ code('$vid->setPriority(${{var["rank"]}})')
+
+ # Set buffer size
+ code('$vid->resize(m_buffer_size);')
+
+ if "recycle_latency" in var:
+ code('$vid->setRecycleLatency( ' \
+ 'Cycles(${{var["recycle_latency"]}}));')
+ else:
+ code('$vid->setRecycleLatency(m_recycle_latency);')
+
+ # set description (may be overriden later by port def)
+ code('''
+$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
+''')
+ code.dedent()
+ code('}\n')
+
+ code.dedent()
+ code('''
+}
+
+void
+$c_ident::init()
+{
// initialize objects
''')
code.indent()
+
for var in self.objects:
vtype = var.type
vid = "m_%s_ptr" % var.ident
code('$vid->setSender(this);')
code('$vid->setReceiver(this);')
- else:
- # Network port object
- network = var["network"]
- ordered = var["ordered"]
-
- if "virtual_network" in var:
- vnet = var["virtual_network"]
- vnet_type = var["vnet_type"]
-
- assert var.machine is not None
- code('''
-$vid = m_net_ptr->get${network}NetQueue(m_version + base, $ordered, $vnet, "$vnet_type");
-assert($vid != NULL);
-''')
-
- # Set the end
- if network == "To":
- code('$vid->setSender(this);')
- else:
- code('$vid->setReceiver(this);')
-
- # Set ordering
- if "ordered" in var:
- # A buffer
- code('$vid->setOrdering(${{var["ordered"]}});')
-
- # Set randomization
- if "random" in var:
- # A buffer
- code('$vid->setRandomization(${{var["random"]}});')
-
- # Set Priority
- if "rank" in var:
- code('$vid->setPriority(${{var["rank"]}})')
-
- # Set buffer size
- if vtype.isBuffer:
- code('''
-if (m_buffer_size > 0) {
- $vid->resize(m_buffer_size);
-}
-''')
-
- # set description (may be overriden later by port def)
- code('''
-$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
-
-''')
-
if vtype.isBuffer:
if "recycle_latency" in var:
code('$vid->setRecycleLatency( ' \
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('if ($vid->functionalRead(pkt)) { return true; }')
+
+ for var in self.config_parameters:
+ vtype = var.type_ast.type
+ if vtype.isBuffer:
+ vid = "m_%s_ptr" % var.ident
+ code('if ($vid->functionalRead(pkt)) { return true; }')
+
code('''
return false;
}
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('num_functional_writes += $vid->functionalWrite(pkt);')
+
+ for var in self.config_parameters:
+ vtype = var.type_ast.type
+ if vtype.isBuffer:
+ vid = "m_%s_ptr" % var.ident
+ code('num_functional_writes += $vid->functionalWrite(pkt);')
+
code('''
return num_functional_writes;
}
''')
- # Check if this controller has a peer, if yes then write the
- # function for connecting to the peer.
- if has_peer:
- code('''
-
-void
-$c_ident::getQueuesFromPeer(AbstractController *peer)
-{
-''')
- for var in self.objects:
- if "network" in var and "physical_network" in var and \
- var["network"] == "From":
- code('''
-m_${{var.ident}}_ptr = peer->getPeerQueue(${{var["physical_network"]}});
-assert(m_${{var.ident}}_ptr != NULL);
-m_${{var.ident}}_ptr->setReceiver(this);
-
-''')
- code('}')
-
code.write(path, "%s.cc" % c_ident)
def printCWakeup(self, path, includes):
#include "dev/etherdevice.hh"
#include "dev/etherobject.hh"
#endif
+#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/mem_object.hh"
#include "python/swig/pyobject.hh"
#include "sim/full_system.hh"
}
}
#endif
+
+ // These could be objects from the ruby memory system. If yes, then at
+ // least one of them should be an abstract controller. Do a type check.
+ AbstractController *ac1, *ac2;
+ ac1 = dynamic_cast<AbstractController*>(o1);
+ ac2 = dynamic_cast<AbstractController*>(o2);
+
+ if (ac1 || ac2) {
+ MessageBuffer *b = new MessageBuffer();
+
+ // set the message buffer associated with the provided names
+ if (ac1) {
+ ac1->setNetQueue(name1, b);
+ }
+ if (ac2) {
+ ac2->setNetQueue(name2, b);
+ }
+
+ return 1;
+ }
+
MemObject *mo1, *mo2;
mo1 = dynamic_cast<MemObject*>(o1);
mo2 = dynamic_cast<MemObject*>(o2);