+++ /dev/null
-SS_COMPATIBLE_FP = 1
-CPU_MODELS = 'AtomicSimpleCPU,TimingSimpleCPU,O3CPU,MinorCPU'
-PROTOCOL = 'Network_test'
--- /dev/null
+TARGET_ISA = 'alpha'
+SS_COMPATIBLE_FP = 1
+CPU_MODELS = 'AtomicSimpleCPU,TimingSimpleCPU,O3CPU,MinorCPU'
+PROTOCOL = 'Garnet_standalone'
--- /dev/null
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# Copyright (c) 2016 Georgia Institute of Technology
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Brad Beckmann
+# Tushar Krishna
+
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from m5.util import addToPath
+from Ruby import create_topology
+
+#
+# Declare caches used by the protocol
+#
+class L1Cache(RubyCache): pass
+
+def define_options(parser):
+ return
+
+def create_system(options, full_system, system, dma_ports, ruby_system):
+ if buildEnv['PROTOCOL'] != 'Garnet_standalone':
+ panic("This script requires Garnet_standalone protocol to be built.")
+
+ cpu_sequencers = []
+
+ #
+ # The Garnet_standalone protocol does not support fs nor dma
+ #
+ assert(dma_ports == [])
+
+ #
+ # The ruby network creation expects the list of nodes in the system to be
+ # consistent with the NetDest list.
+ # Therefore the l1 controller nodes must be listed before
+ # the directory nodes and directory nodes before dma nodes, etc.
+ l1_cntrl_nodes = []
+ dir_cntrl_nodes = []
+
+ #
+ # Must create the individual controllers before the network to ensure the
+ # controller constructors are called before the network constructor
+ #
+
+ for i in xrange(options.num_cpus):
+ #
+ # First create the Ruby objects associated with this cpu
+ # Only one cache exists for this protocol, so by default use the L1D
+ # config parameters.
+ #
+ cache = L1Cache(size = options.l1d_size,
+ assoc = options.l1d_assoc)
+
+ #
+ # Only one unified L1 cache exists. Can cache instructions and data.
+ #
+ l1_cntrl = L1Cache_Controller(version = i,
+ cacheMemory = cache,
+ ruby_system = ruby_system)
+
+ cpu_seq = RubySequencer(icache = cache,
+ dcache = cache,
+ garnet_standalone = True,
+ ruby_system = ruby_system)
+
+ l1_cntrl.sequencer = cpu_seq
+ exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
+
+ # Add controllers and sequencers to the appropriate lists
+ cpu_sequencers.append(cpu_seq)
+ l1_cntrl_nodes.append(l1_cntrl)
+
+ # Connect the L1 controllers and the network
+ l1_cntrl.mandatoryQueue = MessageBuffer()
+ l1_cntrl.requestFromCache = MessageBuffer()
+ l1_cntrl.responseFromCache = MessageBuffer()
+ l1_cntrl.forwardFromCache = MessageBuffer()
+
+
+ phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
+ assert(phys_mem_size % options.num_dirs == 0)
+ mem_module_size = phys_mem_size / options.num_dirs
+
+ for i in xrange(options.num_dirs):
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+
+ dir_cntrl = Directory_Controller(version = i,
+ directory = \
+ RubyDirectoryMemory(version = i,
+ size = dir_size),
+ ruby_system = ruby_system)
+
+ exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
+ dir_cntrl_nodes.append(dir_cntrl)
+
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = MessageBuffer()
+ dir_cntrl.forwardToDir = MessageBuffer()
+ dir_cntrl.responseToDir = MessageBuffer()
+
+
+ all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes
+ ruby_system.network.number_of_virtual_networks = 3
+ topology = create_topology(all_cntrls, options)
+ return (cpu_sequencers, dir_cntrl_nodes, topology)
+++ /dev/null
-# Copyright (c) 2006-2007 The Regents of The University of Michigan
-# Copyright (c) 2009 Advanced Micro Devices, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Brad Beckmann
-
-import m5
-from m5.objects import *
-from m5.defines import buildEnv
-from m5.util import addToPath
-from Ruby import create_topology
-
-#
-# Declare caches used by the protocol
-#
-class L1Cache(RubyCache): pass
-
-def define_options(parser):
- return
-
-def create_system(options, full_system, system, dma_ports, ruby_system):
-
- if buildEnv['PROTOCOL'] != 'Network_test':
- panic("This script requires the Network_test protocol to be built.")
-
- cpu_sequencers = []
-
- #
- # The Garnet tester protocol does not support fs nor dma
- #
- assert(dma_ports == [])
-
- #
- # The ruby network creation expects the list of nodes in the system to be
- # consistent with the NetDest list. Therefore the l1 controller nodes must be
- # listed before the directory nodes and directory nodes before dma nodes, etc.
- #
- l1_cntrl_nodes = []
- dir_cntrl_nodes = []
-
- #
- # Must create the individual controllers before the network to ensure the
- # controller constructors are called before the network constructor
- #
-
- for i in xrange(options.num_cpus):
- #
- # First create the Ruby objects associated with this cpu
- # Only one cache exists for this protocol, so by default use the L1D
- # config parameters.
- #
- cache = L1Cache(size = options.l1d_size,
- assoc = options.l1d_assoc)
-
- #
- # Only one unified L1 cache exists. Can cache instructions and data.
- #
- l1_cntrl = L1Cache_Controller(version = i,
- cacheMemory = cache,
- ruby_system = ruby_system)
-
- cpu_seq = RubySequencer(icache = cache,
- dcache = cache,
- using_network_tester = True,
- ruby_system = ruby_system)
-
- l1_cntrl.sequencer = cpu_seq
- exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
-
- # Add controllers and sequencers to the appropriate lists
- cpu_sequencers.append(cpu_seq)
- l1_cntrl_nodes.append(l1_cntrl)
-
- # Connect the L1 controllers and the network
- l1_cntrl.mandatoryQueue = MessageBuffer()
- l1_cntrl.requestFromCache = MessageBuffer()
- l1_cntrl.responseFromCache = MessageBuffer()
- l1_cntrl.forwardFromCache = MessageBuffer()
-
-
- phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
- assert(phys_mem_size % options.num_dirs == 0)
- mem_module_size = phys_mem_size / options.num_dirs
-
- for i in xrange(options.num_dirs):
- dir_size = MemorySize('0B')
- dir_size.value = mem_module_size
-
- dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory(version = i,
- size = dir_size),
- ruby_system = ruby_system)
-
- exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
- dir_cntrl_nodes.append(dir_cntrl)
-
- # Connect the directory controllers and the network
- dir_cntrl.requestToDir = MessageBuffer()
- dir_cntrl.forwardToDir = MessageBuffer()
- dir_cntrl.responseToDir = MessageBuffer()
-
-
- all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes
- ruby_system.network.number_of_virtual_networks = 3
- topology = create_topology(all_cntrls, options)
- return (cpu_sequencers, dir_cntrl_nodes, topology)
--- /dev/null
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Authors: Brad Beckmann
+ * Tushar Krishna
+ */
+
+
+machine(MachineType:L1Cache, "Garnet_standalone L1 Cache")
+ : Sequencer * sequencer;
+ Cycles issue_latency := 2;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="0",
+ vnet_type = "request";
+ MessageBuffer * forwardFromCache, network="To", virtual_network="1",
+ vnet_type = "forward";
+ MessageBuffer * responseFromCache, network="To", virtual_network="2",
+ vnet_type = "response";
+
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ I, AccessPermission:Invalid, desc="Not Present/Invalid";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // From processor
+ Request, desc="Request from Garnet_standalone";
+ Forward, desc="Forward from Garnet_standalone";
+ Response, desc="Response from Garnet_standalone";
+ }
+
+ // STRUCTURE DEFINITIONS
+ DataBlock dummyData;
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="Data in the block";
+ }
+
+ // FUNCTIONS
+ Tick clockEdge();
+
+ // cpu/testers/networktest/networktest.cc generates packets of the type
+ // ReadReq, INST_FETCH, and WriteReq.
+ // These are converted to LD, IFETCH and ST by mem/ruby/system/RubyPort.cc.
+ // These are then sent to the sequencer, which sends them here.
+ // Garnet_standalone-cache.sm tags LD, IFETCH and ST as Request, Forward,
+ // and Response Events respectively, which are then injected into
+ // virtual networks 0, 1 and 2 respectively.
+ // This models traffic of different types within the network.
+ //
+ // Note that requests and forwards are MessageSizeType:Control,
+ // while responses are MessageSizeType:Data.
+ //
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Request;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Forward;
+ } else if (type == RubyRequestType:ST) {
+ return Event:Response;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+
+ State getState(Entry cache_entry, Addr addr) {
+ return State:I;
+ }
+
+ void setState(Entry cache_entry, Addr addr, State state) {
+
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ }
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ return OOD;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional write.");
+ }
+
+ // NETWORK PORTS
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(forwardNetwork_out, RequestMsg, forwardFromCache);
+ out_port(responseNetwork_out, RequestMsg, responseFromCache);
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest) {
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
+ }
+ }
+ }
+
+ // ACTIONS
+
+ // The destination directory of the packets is embedded in the address
+ // map_Address_to_Directory is used to retrieve it.
+
+ action(a_issueRequest, "a", desc="Issue a request") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MSG;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+
+ // To send broadcasts in vnet0 (to emulate broadcast-based protocols),
+ // replace the above line by the following:
+ // out_msg.Destination := broadcast(MachineType:Directory);
+
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(b_issueForward, "b", desc="Issue a forward") {
+ enqueue(forwardNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MSG;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(c_issueResponse, "c", desc="Issue a response") {
+ enqueue(responseNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MSG;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
+ sequencer.readCallback(address, dummyData);
+ }
+
+ action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
+ sequencer.writeCallback(address, dummyData);
+ }
+
+
+ // TRANSITIONS
+
+ // sequencer hit call back is performed after injecting the packets.
+ // The goal of the Garnet_standalone protocol is only to inject packets into
+ // the network, not to keep track of them via TBEs.
+
+ transition(I, Response) {
+ s_store_hit;
+ c_issueResponse;
+ m_popMandatoryQueue;
+ }
+
+ transition(I, Request) {
+ r_load_hit;
+ a_issueRequest;
+ m_popMandatoryQueue;
+ }
+ transition(I, Forward) {
+ r_load_hit;
+ b_issueForward;
+ m_popMandatoryQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Authors: Brad Beckmann
+ * Tushar Krishna
+ */
+
+
+machine(MachineType:Directory, "Garnet_standalone Directory")
+ : MessageBuffer * requestToDir, network="From", virtual_network="0",
+ vnet_type = "request";
+ MessageBuffer * forwardToDir, network="From", virtual_network="1",
+ vnet_type = "forward";
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ vnet_type = "response";
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, AccessPermission:Invalid, desc="Invalid";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ // processor requests
+ Receive_Request, desc="Receive Message";
+ Receive_Forward, desc="Receive Message";
+ Receive_Response, desc="Receive Message";
+ }
+
+ // TYPES
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // ** FUNCTIONS **
+ Tick clockEdge();
+
+ State getState(Addr addr) {
+ return State:I;
+ }
+
+ void setState(Addr addr, State state) {
+
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional write.");
+ }
+
+ // ** IN_PORTS **
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady(clockEdge())) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:MSG) {
+ trigger(Event:Receive_Request, in_msg.addr);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ in_port(forwardQueue_in, RequestMsg, forwardToDir) {
+ if (forwardQueue_in.isReady(clockEdge())) {
+ peek(forwardQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:MSG) {
+ trigger(Event:Receive_Forward, in_msg.addr);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ in_port(responseQueue_in, RequestMsg, responseToDir) {
+ if (responseQueue_in.isReady(clockEdge())) {
+ peek(responseQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:MSG) {
+ trigger(Event:Receive_Response, in_msg.addr);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue(clockEdge());
+ }
+
+ action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
+ forwardQueue_in.dequeue(clockEdge());
+ }
+
+ action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
+ responseQueue_in.dequeue(clockEdge());
+ }
+
+ // TRANSITIONS
+
+ // The directory simply drops the received packets.
+ // The goal of Garnet_standalone is only to track network stats.
+
+ transition(I, Receive_Request) {
+ i_popIncomingRequestQueue;
+ }
+ transition(I, Receive_Forward) {
+ f_popIncomingForwardQueue;
+ }
+ transition(I, Receive_Response) {
+ r_popIncomingResponseQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ MSG, desc="Message";
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ error("Garnet_standalone does not support functional accesses!");
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ error("Garnet_standalone does not support functional accesses!");
+ }
+}
--- /dev/null
+protocol "Garnet_standalone";
+include "RubySlicc_interfaces.slicc";
+include "Garnet_standalone-msg.sm";
+include "Garnet_standalone-cache.sm";
+include "Garnet_standalone-dir.sm";
+++ /dev/null
-/*
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Authors: Brad Beckmann
- * Tushar Krishna
- */
-
-
-machine(MachineType:L1Cache, "Network_test L1 Cache")
- : Sequencer * sequencer;
- Cycles issue_latency := 2;
-
- // NETWORK BUFFERS
- MessageBuffer * requestFromCache, network="To", virtual_network="0",
- vnet_type = "request";
- MessageBuffer * forwardFromCache, network="To", virtual_network="1",
- vnet_type = "forward";
- MessageBuffer * responseFromCache, network="To", virtual_network="2",
- vnet_type = "response";
-
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- I, AccessPermission:Invalid, desc="Not Present/Invalid";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // From processor
- Request, desc="Request from Network_test";
- Forward, desc="Forward from Network_test";
- Response, desc="Response from Network_test";
- }
-
- // STRUCTURE DEFINITIONS
- DataBlock dummyData;
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="Data in the block";
- }
-
- // FUNCTIONS
- Tick clockEdge();
-
- // cpu/testers/networktest/networktest.cc generates packets of the type
- // ReadReq, INST_FETCH, and WriteReq.
- // These are converted to LD, IFETCH and ST by mem/ruby/system/RubyPort.cc.
- // These are then sent to the sequencer, which sends them here.
- // Network_test-cache.sm tags LD, IFETCH and ST as Request, Forward,
- // and Response Events respectively, which are then injected into
- // virtual networks 0, 1 and 2 respectively.
- // This models traffic of different types within the network.
- //
- // Note that requests and forwards are MessageSizeType:Control,
- // while responses are MessageSizeType:Data.
- //
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Request;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Forward;
- } else if (type == RubyRequestType:ST) {
- return Event:Response;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
-
- State getState(Entry cache_entry, Addr addr) {
- return State:I;
- }
-
- void setState(Entry cache_entry, Addr addr, State state) {
-
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- }
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return OOD;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("Network test does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("Network test does not support functional write.");
- }
-
- // NETWORK PORTS
-
- out_port(requestNetwork_out, RequestMsg, requestFromCache);
- out_port(forwardNetwork_out, RequestMsg, forwardFromCache);
- out_port(responseNetwork_out, RequestMsg, responseFromCache);
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest) {
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
- }
- }
- }
-
- // ACTIONS
-
- // The destination directory of the packets is embedded in the address
- // map_Address_to_Directory is used to retrieve it.
-
- action(a_issueRequest, "a", desc="Issue a request") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MSG;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- //out_msg.Destination := broadcast(MachineType:Directory);
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
-
- action(b_issueForward, "b", desc="Issue a forward") {
- enqueue(forwardNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MSG;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
-
- action(c_issueResponse, "c", desc="Issue a response") {
- enqueue(responseNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MSG;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.MessageSize := MessageSizeType:Data;
- }
- }
-
- action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
- sequencer.readCallback(address, dummyData);
- }
-
- action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
- sequencer.writeCallback(address, dummyData);
- }
-
-
- // TRANSITIONS
-
- // sequencer hit call back is performed after injecting the packets.
- // The goal of the Network_test protocol is only to inject packets into
- // the network, not to keep track of them via TBEs.
-
- transition(I, Response) {
- s_store_hit;
- c_issueResponse;
- m_popMandatoryQueue;
- }
-
- transition(I, Request) {
- r_load_hit;
- a_issueRequest;
- m_popMandatoryQueue;
- }
- transition(I, Forward) {
- r_load_hit;
- b_issueForward;
- m_popMandatoryQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Authors: Brad Beckmann
- * Tushar Krishna
- */
-
-
-machine(MachineType:Directory, "Network_test Directory")
- : MessageBuffer * requestToDir, network="From", virtual_network="0",
- vnet_type = "request";
- MessageBuffer * forwardToDir, network="From", virtual_network="1",
- vnet_type = "forward";
- MessageBuffer * responseToDir, network="From", virtual_network="2",
- vnet_type = "response";
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, AccessPermission:Invalid, desc="Invalid";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- // processor requests
- Receive_Request, desc="Receive Message";
- Receive_Forward, desc="Receive Message";
- Receive_Response, desc="Receive Message";
- }
-
- // TYPES
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- }
-
- // ** FUNCTIONS **
- Tick clockEdge();
-
- State getState(Addr addr) {
- return State:I;
- }
-
- void setState(Addr addr, State state) {
-
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("Network test does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("Network test does not support functional write.");
- }
-
- // ** IN_PORTS **
-
- in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady(clockEdge())) {
- peek(requestQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:MSG) {
- trigger(Event:Receive_Request, in_msg.addr);
- } else {
- error("Invalid message");
- }
- }
- }
- }
- in_port(forwardQueue_in, RequestMsg, forwardToDir) {
- if (forwardQueue_in.isReady(clockEdge())) {
- peek(forwardQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:MSG) {
- trigger(Event:Receive_Forward, in_msg.addr);
- } else {
- error("Invalid message");
- }
- }
- }
- }
- in_port(responseQueue_in, RequestMsg, responseToDir) {
- if (responseQueue_in.isReady(clockEdge())) {
- peek(responseQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:MSG) {
- trigger(Event:Receive_Response, in_msg.addr);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue(clockEdge());
- }
-
- action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
- forwardQueue_in.dequeue(clockEdge());
- }
-
- action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
- responseQueue_in.dequeue(clockEdge());
- }
-
- // TRANSITIONS
-
- // The directory simply drops the received packets.
- // The goal of Network_test is only to track network stats.
-
- transition(I, Receive_Request) {
- i_popIncomingRequestQueue;
- }
- transition(I, Receive_Forward) {
- f_popIncomingForwardQueue;
- }
- transition(I, Receive_Response) {
- r_popIncomingResponseQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- MSG, desc="Message";
-}
-
-// RequestMsg (and also forwarded requests)
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Multicast destination mask";
- DataBlock DataBlk, desc="data for the cache line";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- error("Network test does not support functional accesses!");
- }
-
- bool functionalWrite(Packet *pkt) {
- error("Network test does not support functional accesses!");
- }
-}
+++ /dev/null
-protocol "Network_test";
-include "RubySlicc_interfaces.slicc";
-include "Network_test-msg.sm";
-include "Network_test-cache.sm";
-include "Network_test-dir.sm";
'MOESI_CMP_directory',
'MOESI_CMP_token',
'MOESI_hammer',
- 'Network_test',
+ 'Garnet_standalone',
'None'
])
m_data_cache_hit_latency = p->dcache_hit_latency;
- m_usingNetworkTester = p->using_network_tester;
+ m_runningGarnetStandalone = p->garnet_standalone;
assumingRfOCoherence = p->assume_rfo;
}
// For Alpha, properly handle LL, SC, and write requests with respect to
// locked cache blocks.
//
- // Not valid for Network_test protocl
+ // Not valid for Garnet_standalone protocl
//
bool success = true;
- if (!m_usingNetworkTester)
+ if (!m_runningGarnetStandalone)
success = handleLlsc(address, request);
if (request->m_type == RubyRequestType_Locked_RMW_Read) {
int m_load_waiting_on_store_cycles;
int m_load_waiting_on_load_cycles;
- bool m_usingNetworkTester;
+ bool m_runningGarnetStandalone;
class GPUCoalescerWakeupEvent : public Event
{
assert(m_data_cache_hit_latency > 0);
assert(m_inst_cache_hit_latency > 0);
- m_usingNetworkTester = p->using_network_tester;
+ m_runningGarnetStandalone = p->garnet_standalone;
}
Sequencer::~Sequencer()
// For Alpha, properly handle LL, SC, and write requests with respect to
// locked cache blocks.
//
- // Not valid for Network_test protocl
+ // Not valid for Garnet_standalone protocl
//
bool success = true;
- if (!m_usingNetworkTester)
+ if (!m_runningGarnetStandalone)
success = handleLlsc(address, request);
// Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
int m_coreId;
- bool m_usingNetworkTester;
+ bool m_runningGarnetStandalone;
//! Histogram for number of outstanding requests per cycle.
Stats::Histogram m_outstandReqHist;
"max requests (incl. prefetches) outstanding")
deadlock_threshold = Param.Cycles(500000,
"max outstanding cycles for a request before deadlock/livelock declared")
- using_network_tester = Param.Bool(False, "")
+ garnet_standalone = Param.Bool(False, "")
# id used by protocols that support multiple sequencers per controller
# 99 is the dummy default value
coreid = Param.Int(99, "CorePair core id")