--- /dev/null
+//Default parameters, taken from /athitos/export/08spr_ee382a/sanchezd/runs/gen-scripts/ruby.defaults
+
+//General config
+g_DEADLOCK_THRESHOLD: 20000000
+RANDOMIZATION: false
+g_tester_length: 0
+SIMICS_RUBY_MULTIPLIER: 1
+OPAL_RUBY_MULTIPLIER: 1
+TRANSACTION_TRACE_ENABLED: false
+USER_MODE_DATA_ONLY: false
+PROFILE_HOT_LINES: false
+PROFILE_ALL_INSTRUCTIONS: false
+PRINT_INSTRUCTION_TRACE: false
+g_DEBUG_CYCLE: 0
+PERFECT_MEMORY_SYSTEM: false
+PERFECT_MEMORY_SYSTEM_LATENCY: 0
+DATA_BLOCK: false
+
+// Line, page sizes
+g_DATA_BLOCK_BYTES: 64
+g_PAGE_SIZE_BYTES: 8192
+
+
+g_REPLACEMENT_POLICY: PSEDUO_LRU
+// For all caches (sic)
+
+// L1 config
+// 32KB, 4-way SA
+L1_CACHE_ASSOC: 4
+L1_CACHE_NUM_SETS_BITS: 7
+// Single-cycle latency, hits take fastpath
+SEQUENCER_TO_CONTROLLER_LATENCY: 1
+REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH: false
+// L1->L2 delays
+L1_REQUEST_LATENCY: 1
+L1_RESPONSE_LATENCY: 1
+
+// L2 parameters
+// 4 MB, 16-way SA
+L2_CACHE_ASSOC: 16
+L2_CACHE_NUM_SETS_BITS: 12
+MAP_L2BANKS_TO_LOWEST_BITS: false
+// Bank latencies
+L2_RESPONSE_LATENCY: 10
+L2_TAG_LATENCY: 5
+
+
+// Directory latencies
+// The one that counts, we have perfect dirs
+DIRECTORY_CACHE_LATENCY: 6
+// should not be used, but just in case...
+DIRECTORY_LATENCY: 6
+
+// Simple network parameters
+// external links
+NETWORK_LINK_LATENCY: 1
+// intra-chip links
+ON_CHIP_LINK_LATENCY: 1
+
+// General latencies
+RECYCLE_LATENCY: 1
+//Used in MessageBuffer, also MSI_MOSI_CMP dir controller
+
+
+// Unused parameters, good to define them to really weird things just in case
+NULL_LATENCY: 100000
+// Only SMP and token CMP protocols
+ISSUE_LATENCY: 100000
+// Only SMP, example protocols
+CACHE_RESPONSE_LATENCY: 100000
+// Only SMP protocols
+COPY_HEAD_LATENCY: 100000
+// In no protocols or ruby code
+L2_RECYCLE_LATENCY: 100000
+// In no protocols or ruby code
+TIMER_LATENCY: 100000
+// Not used
+TBE_RESPONSE_LATENCY: 100000
+// Not used
+PERIODIC_TIMER_WAKEUPS: false
+// Not used
+BLOCK_STC: false
+// Not used
+SINGLE_ACCESS_L2_BANKS: false
+// Not used
+
+// Main memory latency
+MEMORY_RESPONSE_LATENCY_MINUS_2: 448 //not used in _m, see below
+
+PROFILE_EXCEPTIONS: false
+PROFILE_XACT: false
+PROFILE_NONXACT: true
+XACT_DEBUG: false
+XACT_DEBUG_LEVEL: 1
+XACT_MEMORY: false
+XACT_ENABLE_TOURMALINE: false
+XACT_NUM_CURRENT: 0
+XACT_LAST_UPDATE: 0
+XACT_ISOLATION_CHECK: false
+PERFECT_FILTER: true
+READ_WRITE_FILTER: Perfect_
+PERFECT_VIRTUAL_FILTER: true
+VIRTUAL_READ_WRITE_FILTER: Perfect_
+PERFECT_SUMMARY_FILTER: true
+SUMMARY_READ_WRITE_FILTER: Perfect_
+XACT_EAGER_CD: true
+XACT_LAZY_VM: false
+XACT_CONFLICT_RES: BASE
+XACT_COMMIT_TOKEN_LATENCY: 0
+XACT_NO_BACKOFF: false
+XACT_LOG_BUFFER_SIZE: 0
+XACT_STORE_PREDICTOR_HISTORY: 0
+XACT_STORE_PREDICTOR_ENTRIES: 0
+XACT_STORE_PREDICTOR_THRESHOLD: 0
+XACT_FIRST_ACCESS_COST: 0
+XACT_FIRST_PAGE_ACCESS_COST: 0
+ENABLE_MAGIC_WAITING: false
+ENABLE_WATCHPOINT: false
+XACT_ENABLE_VIRTUALIZATION_LOGTM_SE: false
+ATMTP_ENABLED: false
+ATMTP_ABORT_ON_NON_XACT_INST: false
+ATMTP_ALLOW_SAVE_RESTORE_IN_XACT: false
+ATMTP_XACT_MAX_STORES: 0
+ATMTP_DEBUG_LEVEL: 0
+XACT_LENGTH: 0
+XACT_SIZE: 0
+ABORT_RETRY_TIME: 0
+
+
+// Allowed parallelism in controllers
+L1CACHE_TRANSITIONS_PER_RUBY_CYCLE: 32
+L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 1000
+DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 1000
+g_SEQUENCER_OUTSTANDING_REQUESTS: 16
+
+//TBEs == MSHRs (global)
+NUMBER_OF_TBES: 128
+NUMBER_OF_L1_TBES: 32
+// unused in CMP protocols
+NUMBER_OF_L2_TBES: 32
+// unused in CMP protocols
+
+
+// TSO & WBuffer params (unused)
+FINITE_BUFFERING: false
+FINITE_BUFFER_SIZE: 3
+PROCESSOR_BUFFER_SIZE: 10
+PROTOCOL_BUFFER_SIZE: 32
+TSO: false
+
+// General network params
+g_endpoint_bandwidth: 10000
+g_adaptive_routing: true
+NUMBER_OF_VIRTUAL_NETWORKS: 5
+FAN_OUT_DEGREE: 4
+// for HIERARCHICAL_SWITCH
+
+
+// Detailed Memory Controller Params (only used in _m protocols)
+MEM_BUS_CYCLE_MULTIPLIER: 5
+BANKS_PER_RANK: 8
+RANKS_PER_DIMM: 2
+DIMMS_PER_CHANNEL: 2
+BANK_BIT_0: 8
+RANK_BIT_0: 11
+DIMM_BIT_0: 12
+
+BANK_QUEUE_SIZE: 12
+BANK_BUSY_TIME: 22
+RANK_RANK_DELAY: 2
+READ_WRITE_DELAY: 3
+BASIC_BUS_BUSY_TIME: 3
+MEM_CTL_LATENCY: 20
+REFRESH_PERIOD: 3120
+TFAW: 0
+//flip a coin to delay requests by one cycle, introduces non-determinism
+MEM_RANDOM_ARBITRATE: 50
+MEM_FIXED_DELAY: 0
+
+
+//Configuration-specific parameters
+g_NUM_PROCESSORS: 1
+g_NUM_CHIPS: 1
+g_PROCS_PER_CHIP: 1
+g_NUM_L2_BANKS: 1
+g_NUM_MEMORIES: 4
+g_PRINT_TOPOLOGY: true
+g_GARNET_NETWORK: true
+g_DETAIL_NETWORK: true
+g_FLIT_SIZE: 8
--- /dev/null
+# Copyright (c) 2006-2008 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+
+# Simple test script
+#
+# "m5 test.py"
+
+import m5
+
+if m5.build_env['FULL_SYSTEM']:
+ m5.panic("This script requires syscall emulation mode (*_SE).")
+
+from m5.objects import *
+import os, optparse, sys
+from os.path import join as joinpath
+m5.AddToPath('../common')
+import Simulation
+#from Caches import *
+from cpu2000 import *
+
+# Get paths we might need. It's expected this file is in m5/configs/example.
+config_path = os.path.dirname(os.path.abspath(__file__))
+config_root = os.path.dirname(config_path)
+m5_root = os.path.dirname(config_root)
+
+parser = optparse.OptionParser()
+
+# Benchmark options
+parser.add_option("-c", "--cmd",
+ default=joinpath(m5_root, "tests/test-progs/hello/bin/alpha/linux/hello"),
+ help="The binary to run in syscall emulation mode.")
+parser.add_option("-o", "--options", default="",
+ help='The options to pass to the binary, use " " around the entire string')
+parser.add_option("-i", "--input", default="", help="Read stdin from a file.")
+parser.add_option("--output", default="", help="Redirect stdout to a file.")
+parser.add_option("--errout", default="", help="Redirect stderr to a file.")
+parser.add_option("--ruby-debug", action="store_true")
+parser.add_option("--ruby-debug-file", default="", help="Ruby debug out file (stdout if blank)")
+
+execfile(os.path.join(config_root, "common", "Options.py"))
+
+(options, args) = parser.parse_args()
+
+if args:
+ print "Error: script doesn't take any positional arguments"
+ sys.exit(1)
+
+if options.bench:
+ try:
+ if m5.build_env['TARGET_ISA'] != 'alpha':
+ print >>sys.stderr, "Simpoints code only works for Alpha ISA at this time"
+ sys.exit(1)
+ exec("workload = %s('alpha', 'tru64', 'ref')" % options.bench)
+ process = workload.makeLiveProcess()
+ except:
+ print >>sys.stderr, "Unable to find workload for %s" % options.bench
+ sys.exit(1)
+else:
+ process = LiveProcess()
+ process.executable = options.cmd
+ process.cmd = [options.cmd] + options.options.split()
+
+
+if options.input != "":
+ process.input = options.input
+if options.output != "":
+ process.output = options.output
+if options.errout != "":
+ process.errout = options.errout
+
+if options.detailed:
+ #check for SMT workload
+ workloads = options.cmd.split(';')
+ if len(workloads) > 1:
+ process = []
+ smt_idx = 0
+ inputs = []
+ outputs = []
+ errouts = []
+
+ if options.input != "":
+ inputs = options.input.split(';')
+ if options.output != "":
+ outputs = options.output.split(';')
+ if options.errout != "":
+ errouts = options.errout.split(';')
+
+ for wrkld in workloads:
+ smt_process = LiveProcess()
+ smt_process.executable = wrkld
+ smt_process.cmd = wrkld + " " + options.options
+ if inputs and inputs[smt_idx]:
+ smt_process.input = inputs[smt_idx]
+ if outputs and outputs[smt_idx]:
+ smt_process.output = outputs[smt_idx]
+ if errouts and errouts[smt_idx]:
+ smt_process.errout = errouts[smt_idx]
+ process += [smt_process, ]
+ smt_idx += 1
+
+(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
+
+CPUClass.clock = '1GHz'
+
+np = options.num_cpus
+
+rubymem = RubyMemory(
+ range = AddrRange("512MB"),
+ clock = "1GHz",
+ num_cpus = np,
+ libruby_file = "src/mem/ruby/amd64-linux/generated/MOESI_CMP_directory/bin/libruby.so",
+ config_file = "ruby.config",
+ stats_file = "m5out/ruby.stats"
+)
+
+if options.ruby_debug == True:
+ rubymem.debug = True
+ rubymem.debug_file = options.ruby_debug_file
+
+system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
+ physmem = rubymem)
+
+if options.l2cache:
+ print "Error: -l2cache incompatible with ruby, must configure it ruby-style"
+ sys.exit(1)
+
+if options.caches:
+ print "Error: -caches incompatible with ruby, must configure it ruby-style"
+ sys.exit(1)
+
+for i in xrange(np):
+ system.cpu[i].connectMemPorts(system.physmem)
+
+
+ '''process = LiveProcess()
+ process.executable = options.cmd
+ process.cmd = [options.cmd, str(i)]
+ '''
+ system.cpu[i].workload = process
+
+ if options.fastmem:
+ system.cpu[i].physmem_port = system.physmem.port
+
+
+root = Root(system = system)
+
+Simulation.run(options, root, system, FutureClass)
--- /dev/null
+# Copyright (c) 2005-2008 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+from m5.params import *
+from m5.proxy import *
+
+from PhysicalMemory import PhysicalMemory
+
+class RubyMemory(PhysicalMemory):
+ type = 'RubyMemory'
+ clock = Param.Clock('1t', "ruby clock speed")
+ phase = Param.Latency('0ns', "ruby clock phase")
+ config_file = Param.String("", "path to the Ruby config file")
+ config_options = Param.String("", "extra Ruby options (one per line)")
+ stats_file = Param.String("ruby.stats",
+ "file to which ruby dumps its stats")
+ num_cpus = Param.Int(1, "Number of CPUs connected to the Ruby memory")
+ debug = Param.Bool(False, "Use ruby debug")
+ debug_file = Param.String("",
+ "path to the Ruby debug output file (stdout if blank)")
+
SimObject('Bridge.py')
SimObject('Bus.py')
-SimObject('PhysicalMemory.py')
SimObject('MemObject.py')
+SimObject('PhysicalMemory.py')
+SimObject('RubyMemory.py')
Source('bridge.cc')
Source('bus.cc')
Source('port.cc')
Source('tport.cc')
Source('mport.cc')
+Source('rubymem.cc')
if env['FULL_SYSTEM']:
Source('vport.cc')
* @param initializingString A string (with value pairs) for initialization
* @param allocate_f A ptr to the allocate function
* @param generate_values A ptr to the generate values function
- * @param my_get_attr A ptr to the get attribute function
- * @param my_set_attr A ptr to the set attribute function
*/
initvar_t( const char *name, const char *relativeIncludePath,
const char *initializingString,
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/protocol/CacheRequestType.hh"
+#include "mem/packet.hh"
class RubySystem;
class SubBlock;
// Public Methods
virtual void get_network_config() {}
- virtual void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) = 0; // Called by sequencer
- virtual void conflictCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) { assert(0); } // Called by sequencer
+ virtual void hitCallback(Packet* pkt) = 0;
virtual integer_t getInstructionCount(int procID) const { return 1; }
virtual integer_t getCycleCount(int procID) const { return 1; }
- virtual SimicsHypervisor * getHypervisor() { return NULL; }
- virtual void notifySendNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id) { assert(0); }; //Called by Sequencer
- virtual void notifyReceiveNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id) { assert(0); }; //Called by Sequencer
- virtual void notifyReceiveNackFinal( int procID, const Address & addr) { assert(0); }; // Called by Sequencer
- virtual void notifyTrapStart( int procID, const Address & handlerPC, int threadID, int smtThread ) { assert(0); } //called by Sequencer
- virtual void notifyTrapComplete( int procID, const Address & newPC, int smtThread ) {assert(0); } // called by Sequencer
- virtual int getOpalTransactionLevel(int procID, int thread) const {
- cout << "mem/ruby/common/Driver.hh getOpalTransactionLevel() " << endl;
- return 0; } //called by Sequencer
- virtual void addThreadDependency(int procID, int requestor_thread, int conflict_thread) const { assert(0);}
- virtual uint64 getOpalTime(int procID) const{ return 0; } //called by Sequencer
- virtual uint64 getOpalTimestamp(int procID, int thread) const{
- cout << "mem/ruby/common/Driver.hh getOpalTimestamp " << endl;
- return 0; } // called by Sequencer
- virtual int inTransaction(int procID, int thread ) const{
- cout << "mem/ruby/common/Driver.hh inTransaction " << endl;
-return false; } //called by Sequencer
+
virtual void printDebug(){} //called by Sequencer
virtual void printStats(ostream& out) const = 0;
virtual void printConfig(ostream& out) const = 0;
- //virtual void abortCallback(NodeID proc){}
-
virtual integer_t readPhysicalMemory(int procID, physical_address_t address,
int len ){ ASSERT(0); return 0; }
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/common/Debug.hh"
+#include "mem/ruby/common/Driver.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/tester/Tester.hh"
#include "mem/ruby/init.hh"
}
//***************************************************************************
+
void init_variables( void )
{
- // allocate the "variable initialization" package
- ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
- global_default_param,
- &init_simulator,
- &init_generate_values );
+ // allocate the "variable initialization" package
+ ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
+ global_default_param,
+ &init_simulator,
+ &init_generate_values);
+}
+
+
+ /*
+void init_variables(const char* config_str )
+{
+ // allocate the "variable initialization" package
+ ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
+ config_str,
+ &init_simulator,
+ &init_generate_values );
}
+ */
void init_simulator()
{
cout << "Ruby initialization complete" << endl;
}
+void init_simulator(Driver* _driver)
+{
+ // Set things to NULL to make sure we don't de-reference them
+ // without a seg. fault.
+ g_system_ptr = NULL;
+ g_debug_ptr = NULL;
+ g_eventQueue_ptr = NULL;
+
+ cout << "Ruby Timing Mode" << endl;
+
+
+ g_debug_ptr = new Debug( DEBUG_FILTER_STRING,
+ DEBUG_VERBOSITY_STRING,
+ DEBUG_START_TIME,
+ DEBUG_OUTPUT_FILENAME );
+ RubyConfig::init();
+
+ cout << "Creating event queue..." << endl;
+ g_eventQueue_ptr = new RubyEventQueue;
+ cout << "Creating event queue done" << endl;
+
+ cout << "Creating system..." << endl;
+ cout << " Processors: " << RubyConfig::numberOfProcessors() << endl;
+
+ g_system_ptr = new RubySystem(_driver);
+ cout << "Creating system done" << endl;
+
+ cout << "Ruby initialization complete" << endl;
+}
+
void destroy_simulator()
{
cout << "Deleting system..." << endl;
| M5 in phase 1 integration, and possibly afterwards, too. |
+-------------------------------------------------------------------------*/
-extern "C"
+//dsm: superfluous
+/*extern "C"
int OnLoadRuby() {
init_variables();
return 0;
extern "C"
int OnUnloadRuby() {
- destroy_simulator();
- return 0;
-}
+ destroy_simulator();
+ return 0;
+}*/
/* I have to put it somewhere for now */
void tester_main(int argc, char **argv) {
#ifndef INIT_H
#define INIT_H
+class Driver;
+
extern void init_variables();
+//extern void init_variables(const char* config_str);
extern void init_simulator();
+extern void init_simulator(Driver* _driver);
extern void destroy_simulator();
#endif //INIT_H
+++ /dev/null
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------------*/
-/* Macro declarations */
-/*------------------------------------------------------------------------*/
-
-#ifndef _MF_MEMORY_API_H_
-#define _MF_MEMORY_API_H_
-
-#ifdef SIMICS30
-#ifndef pa_t
-typedef physical_address_t pa_t;
-typedef physical_address_t la_t;
-#endif
-#endif
-
-/**
- * Defines types of memory requests
- */
-typedef enum OpalMemop {
- OPAL_LOAD,
- OPAL_STORE,
- OPAL_IFETCH,
- OPAL_ATOMIC,
-} OpalMemop_t;
-
-/*------------------------------------------------------------------------*/
-/* Class declaration(s) */
-/*------------------------------------------------------------------------*/
-
-/**
-* structure which provides an interface between ruby and opal.
-*/
-typedef struct mf_opal_api {
- /**
- * @name Methods
- */
- //@{
- /**
- * notify processor model that data from address address is available at proc
- */
- void (*hitCallback)( int cpuNumber, pa_t phys_address, OpalMemop_t type, int thread );
-
- /**
- * notify opal that ruby is loaded, or removed
- */
- void (*notifyCallback)( int status );
-
- /**
- * query for the number of instructions executed on a given processor.
- */
- integer_t (*getInstructionCount)( int cpuNumber );
-
- // for printing out debug info on crash
- void (*printDebug)();
-
- /** query Opal for the current time */
- uint64 (*getOpalTime)(int cpuNumber);
-
- /** For WATTCH power stats */
- // Called whenever L2 is accessed
- void (*incrementL2Access)(int cpuNumber);
- // Called whenever prefetcher is accessed
- void (*incrementPrefetcherAccess)(int cpuNumber, int num_prefetches, int isinstr);
-
- /* Called whenever there's an L2 miss */
- void (*notifyL2Miss)(int cpuNumber, physical_address_t physicalAddr, OpalMemop_t type, int tagexists);
-
- //@}
-} mf_opal_api_t;
-
-typedef struct mf_ruby_api {
- /**
- * @name Methods
- */
- //@{
- /**
- * Check to see if the system is ready for more requests
- */
- int (*isReady)( int cpuNumber, la_t logicalAddr, pa_t physicalAddr, OpalMemop_t typeOfRequest, int thread );
-
- /**
- * Make a 'mandatory' request to the memory hierarchy
- */
- void (*makeRequest)( int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
- int requestSize, OpalMemop_t typeOfRequest,
- la_t virtualPC, int isPriv, int thread);
-
- /**
- * Make a prefetch request to the memory hierarchy
- */
- void (*makePrefetch)( int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
- int requestSize, OpalMemop_t typeOfRequest,
- la_t virtualPC, int isPriv, int thread);
-
- /**
- * Ask the memory hierarchy for 'stale' data that can be used for speculation
- * Returns true (1) if the tag matches, false (0) if not.
- */
- int (*staleDataRequest)( int cpuNumber, pa_t physicalAddr,
- int requestSize, int8 *buffer );
-
- /**
- * Advance ruby's cycle time one step
- */
- void (*advanceTime)( void );
-
- /**
- * Get ruby's cycle time count.
- */
- uint64 (*getTime)( void );
-
- /** prints Ruby's outstanding request table */
- void (*printProgress)(int cpuNumber);
-
- /**
- * notify ruby that opal is loaded, or removed
- */
- void (*notifyCallback)( int status );
-
- // Returns the number of outstanding request
- int (*getNumberOutstanding)(int cpuNumber);
-
- // Returns the number of outstanding demand requests
- int (*getNumberOutstandingDemand)(int cpuNumber );
-
- // Returns the number of outstanding prefetch request
- int (*getNumberOutstandingPrefetch)(int cpuNumber );
-
-
- //@}
-} mf_ruby_api_t;
-
-#endif //_MF_MEMORY_API_H_
#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/protocol/CacheMsg.hh"
+#include "mem/packet.hh"
TraceRecord::TraceRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
{
Sequencer* sequencer_ptr = chip_ptr->getSequencer((m_node_num/RubyConfig::numberofSMTThreads())%RubyConfig::numberOfProcsPerChip());
assert(sequencer_ptr != NULL);
- CacheMsg request(m_data_address, m_data_address, m_type, m_pc_address, AccessModeType_UserMode, 0, PrefetchBit_Yes, 0, Address(0), 0 /* only 1 SMT thread */);
+ Addr data_addr = m_data_address.getAddress();
+ Addr pc_addr = m_pc_address.getAddress();
+ Request request(0, data_addr, 0, Flags<unsigned int>(Request::PREFETCH), pc_addr, m_node_num, 0);
+ MemCmd::Command command;
+ if (m_type == CacheRequestType_LD || m_type == CacheRequestType_IFETCH)
+ command = MemCmd::ReadReq;
+ else if (m_type == CacheRequestType_ST)
+ command = MemCmd::WriteReq;
+ else if (m_type == CacheRequestType_ATOMIC)
+ command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
+ else
+ assert(false);
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
// Clear out the sequencer
while (!sequencer_ptr->empty()) {
g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
}
- sequencer_ptr->makeRequest(request);
+ sequencer_ptr->makeRequest(&pkt);
// Clear out the sequencer
while (!sequencer_ptr->empty()) {
#include "mem/ruby/common/Global.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/recorder/CacheRecorder.hh"
+
+//dsm: PRUNED
+//#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/DataBlock.hh"
int m_cache_num_sets;
int m_cache_num_set_bits;
int m_cache_assoc;
+
+ bool is_locked; // for LL/SC
};
// Output operator declaration
inline
void CacheMemory<ENTRY>::recordCacheContents(CacheRecorder& tr) const
{
- for (int i = 0; i < m_cache_num_sets; i++) {
+//dsm: Uses CacheRecorder, PRUNED
+assert(false);
+
+/* for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
AccessPermission perm = m_cache[i][j].m_Permission;
CacheRequestType request_type = CacheRequestType_NULL;
Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
}
}
- }
+ }*/
}
template<class ENTRY>
#include "mem/ruby/common/SubBlock.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/gems_common/Map.hh"
+#include "mem/packet.hh"
Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
m_chip_ptr = chip_ptr;
m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+ m_packetTable_ptr = new Map<Address, Packet*>;
+
for(int p=0; p < smt_threads; ++p){
m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
(type == CacheRequestType_ATOMIC);
if (TSO && write) {
- m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data);
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data,
+ m_packetTable_ptr->lookup(request.getAddress()));
} else {
// Copy the correct bytes out of the cache line into the subblock
}
// Call into the Driver and let it read and/or modify the sub-block
- g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
+ Packet* pkt = m_packetTable_ptr->lookup(request.getAddress());
+
+ // update data if this is a store/atomic
+
+ /*
+ if (pkt->req->isCondSwap()) {
+ L1Cache_Entry entry = m_L1Cache_vec[m_version]->lookup(Address(pkt->req->physAddr()));
+ DataBlk datablk = entry->getDataBlk();
+ uint8_t *orig_data = datablk.getArray();
+ if ( datablk.equal(pkt->req->getExtraData()) )
+ datablk->setArray(pkt->getData());
+ pkt->setData(orig_data);
+ }
+ */
+
+ g_system_ptr->getDriver()->hitCallback(pkt);
+ m_packetTable_ptr->remove(request.getAddress());
// If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
// (This is only triggered for the non-TSO case)
g_system_ptr->getDriver()->printDebug();
}
+//dsm: breaks build, delayed
// Returns true if the sequencer already has a load or store outstanding
bool
Sequencer::isReady(const Packet* pkt) const
Address(logical_addr), // Virtual Address
thread // SMT thread
);
- isReady(request);
+ return isReady(request);
}
bool
return true;
}
-// Called by Driver
+//dsm: breaks build, delayed
+// Called by Driver (Simics or Tester).
void
-Sequencer::makeRequest(const Packet* pkt, void* data)
+Sequencer::makeRequest(Packet* pkt)
{
int cpu_number = pkt->req->contextId();
la_t logical_addr = pkt->req->getVaddr();
pa_t physical_addr = pkt->req->getPaddr();
int request_size = pkt->getSize();
CacheRequestType type_of_request;
+ PrefetchBit prefetch;
+ bool write = false;
if ( pkt->req->isInstFetch() ) {
type_of_request = CacheRequestType_IFETCH;
} else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
type_of_request = CacheRequestType_ATOMIC;
+ write = true;
} else if ( pkt->isRead() ) {
type_of_request = CacheRequestType_LD;
} else if ( pkt->isWrite() ) {
type_of_request = CacheRequestType_ST;
+ write = true;
} else {
assert(false);
}
+ if (pkt->req->isPrefetch()) {
+ prefetch = PrefetchBit_Yes;
+ } else {
+ prefetch = PrefetchBit_No;
+ }
la_t virtual_pc = pkt->req->getPC();
int isPriv = false; // TODO: get permission data
int thread = pkt->req->threadId();
Address(virtual_pc),
access_mode, // User/supervisor mode
request_size, // Size in bytes of request
- PrefetchBit_No, // Not a prefetch
+ prefetch,
0, // Version number
Address(logical_addr), // Virtual Address
thread // SMT thread
);
- makeRequest(request);
-}
-
-void
-Sequencer::makeRequest(const CacheMsg& request)
-{
- bool write = (request.getType() == CacheRequestType_ST) ||
- (request.getType() == CacheRequestType_ATOMIC);
- if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) {
+ if ( TSO && write && !pkt->req->isPrefetch() ) {
assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
- m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request);
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(pkt, request);
return;
}
- bool hit = doRequest(request);
+ m_packetTable_ptr->insert(Address( physical_addr ), pkt);
+ doRequest(request);
}
bool Sequencer::doRequest(const CacheMsg& request) {
#include "mem/protocol/GenericMachineType.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/gems_common/Map.hh"
-#include "mem/packet.hh"
class DataBlock;
class AbstractChip;
class CacheMsg;
class Address;
class MachineID;
+class Packet;
class Sequencer : public Consumer {
public:
void printDebug();
// called by Tester or Simics
- void makeRequest(const Packet* pkt, void* data);
- void makeRequest(const CacheMsg& request); // depricate this function
+ void makeRequest(Packet* pkt);
bool doRequest(const CacheMsg& request);
void issueRequest(const CacheMsg& request);
bool isReady(const Packet* pkt) const;
// One request table per SMT thread
Map<Address, CacheMsg>** m_writeRequestTable_ptr;
Map<Address, CacheMsg>** m_readRequestTable_ptr;
+
+ Map<Address, Packet*>* m_packetTable_ptr;
+
// Global outstanding request count, across all request tables
int m_outstanding_count;
bool m_deadlock_check_scheduled;
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/packet.hh"
// *** Begin Helper class ***
struct StoreBufferEntry {
// Handle an incoming store request, this method is responsible for
// calling hitCallback as needed
-void StoreBuffer::insertStore(const CacheMsg& request)
+void
+StoreBuffer::insertStore(Packet* pkt, const CacheMsg& request)
{
Address addr = request.getAddress();
CacheRequestType type = request.getType();
// Perform the hit-callback for the store
SubBlock subblock(addr, size);
if(type == CacheRequestType_ST) {
- g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID(), subblock, type, threadID);
+ g_system_ptr->getDriver()->hitCallback(pkt);
assert(subblock.getSize() != 0);
} else {
// wait to perform the hitCallback until later for Atomics
// Perform possible pre-fetch
if(!isEmpty()) {
- CacheMsg new_request = request;
- new_request.getPrefetch() = PrefetchBit_Yes;
- m_chip_ptr->getSequencer(m_version)->makeRequest(new_request);
+ Packet new_pkt(pkt);
+ pkt->req->setFlags(Request::PREFETCH);
+ m_chip_ptr->getSequencer(m_version)->makeRequest(&new_pkt);
}
// Update the StoreCache
processHeadOfQueue();
}
-void StoreBuffer::callBack(const Address& addr, DataBlock& data)
+void StoreBuffer::callBack(const Address& addr, DataBlock& data, Packet* pkt)
{
DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "callBack");
DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
} else {
// We waited to perform the hitCallback until now for Atomics
peek().m_subblock.mergeFrom(data); // copy the correct bytes from DataBlock into the SubBlock for the Load part of the atomic Load/Store
- g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID(), peek().m_subblock, type, threadID);
+ g_system_ptr->getDriver()->hitCallback(pkt);
m_seen_atomic = false;
/// FIXME - record the time spent in the store buffer - split out ST vs ATOMIC
class SubBlock;
class StoreBufferEntry;
class AbstractChip;
+class Packet;
template <class TYPE> class Vector;
// Public Methods
void wakeup(); // Used only for deadlock detection
- void callBack(const Address& addr, DataBlock& data);
- void insertStore(const CacheMsg& request);
+ void callBack(const Address& addr, DataBlock& data, Packet* pkt);
+ void insertStore(Packet* pkt, const CacheMsg& request);
void updateSubBlock(SubBlock& sub_block) const { m_store_cache.update(sub_block); }
bool trySubBlock(const SubBlock& sub_block) const { assert(isReady()); return m_store_cache.check(sub_block); }
void print(ostream& out) const;
#include "mem/protocol/Chip.hh"
//#include "mem/ruby/recorder/Tracer.hh"
#include "mem/protocol/Protocol.hh"
-//#include "XactIsolationChecker.hh" // gem5:Arka for decomissioning of log_tm
-//#include "XactCommitArbiter.hh"
-//#include "XactVisualizer.hh"
-#include "mem/ruby/interfaces/M5Driver.hh"
RubySystem::RubySystem()
+{
+ init();
+ m_preinitialized_driver = false;
+ createDriver();
+
+ /* gem5:Binkert for decomissiong of tracer
+ m_tracer_ptr = new Tracer;
+ */
+
+ /* gem5:Arka for decomissiong of log_tm
+ if (XACT_MEMORY) {
+ m_xact_isolation_checker = new XactIsolationChecker;
+ m_xact_commit_arbiter = new XactCommitArbiter;
+ m_xact_visualizer = new XactVisualizer;
+ }
+*/
+}
+
+RubySystem::RubySystem(Driver* _driver)
+{
+ init();
+ m_preinitialized_driver = true;
+ m_driver_ptr = _driver;
+}
+
+RubySystem::~RubySystem()
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ delete m_chip_vector[i];
+ }
+ if (!m_preinitialized_driver)
+ delete m_driver_ptr;
+ delete m_network_ptr;
+ delete m_profiler_ptr;
+ /* gem5:Binkert for decomissiong of tracer
+ delete m_tracer_ptr;
+ */
+}
+
+void RubySystem::init()
{
DEBUG_MSG(SYSTEM_COMP, MedPrio,"initializing");
}
}
#endif
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"finished initializing");
+ DEBUG_NEWLINE(SYSTEM_COMP, MedPrio);
+}
+void RubySystem::createDriver()
+{
if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
cerr << "Creating Synthetic Driver" << endl;
m_driver_ptr = new SyntheticDriver(this);
} else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
cerr << "Creating Deterministic Driver" << endl;
m_driver_ptr = new DeterministicDriver(this);
- } else {
- cerr << "Creating M5 Driver" << endl;
- m_driver_ptr = new M5Driver(this);
}
- /* gem5:Binkert for decomissiong of tracer
- m_tracer_ptr = new Tracer;
- */
-
- /* gem5:Arka for decomissiong of log_tm
- if (XACT_MEMORY) {
- m_xact_isolation_checker = new XactIsolationChecker;
- m_xact_commit_arbiter = new XactCommitArbiter;
- m_xact_visualizer = new XactVisualizer;
- }
-*/
- DEBUG_MSG(SYSTEM_COMP, MedPrio,"finished initializing");
- DEBUG_NEWLINE(SYSTEM_COMP, MedPrio);
-
-}
-
-RubySystem::~RubySystem()
-{
- for (int i = 0; i < m_chip_vector.size(); i++) {
- delete m_chip_vector[i];
- }
- delete m_driver_ptr;
- delete m_network_ptr;
- delete m_profiler_ptr;
- /* gem5:Binkert for decomissiong of tracer
- delete m_tracer_ptr;
- */
}
void RubySystem::printConfig(ostream& out) const
public:
// Constructors
RubySystem();
+ RubySystem(Driver* _driver); // used when driver is already instantiated (e.g. M5's RubyMem)
// Destructor
~RubySystem();
private:
// Private Methods
+ void init();
+ void createDriver();
// Private copy constructor and assignment operator
RubySystem(const RubySystem& obj);
Network* m_network_ptr;
Vector<AbstractChip*> m_chip_vector;
Profiler* m_profiler_ptr;
+ bool m_preinitialized_driver;
Driver* m_driver_ptr;
Tracer* m_tracer_ptr;
XactIsolationChecker *m_xact_isolation_checker;
#include "mem/ruby/system/System.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/protocol/Chip.hh"
+#include "mem/packet.hh"
Check::Check(const Address& address, const Address& pc)
{
} else {
type = CacheRequestType_ST;
}
+
+ Addr data_addr = m_address.getAddress();
+ Addr pc_addr = m_pc.getAddress();
+ Request request(0, data_addr, 0, Flags<unsigned int>(Request::PREFETCH), pc_addr, 0, 0);
+ MemCmd::Command command;
+ if (type == CacheRequestType_IFETCH) {
+ command = MemCmd::ReadReq;
+ request.setFlags(Request::INST_FETCH);
+ } else if (type == CacheRequestType_LD || type == CacheRequestType_IFETCH) {
+ command = MemCmd::ReadReq;
+ } else if (type == CacheRequestType_ST) {
+ command = MemCmd::WriteReq;
+ } else if (type == CacheRequestType_ATOMIC) {
+ command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
+ } else {
+ assert(false);
+ }
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
assert(targetSequencer_ptr != NULL);
- CacheMsg request(m_address, m_address, type, m_pc, m_access_mode, 0, PrefetchBit_Yes, 0, Address(0), 0 /* only 1 SMT thread */);
- if (targetSequencer_ptr->isReady(request)) {
- targetSequencer_ptr->makeRequest(request);
+ if (targetSequencer_ptr->isReady(&pkt)) {
+ targetSequencer_ptr->makeRequest(&pkt);
}
}
type = CacheRequestType_ATOMIC;
}
- CacheMsg request(Address(m_address.getAddress()+m_store_count), Address(m_address.getAddress()+m_store_count), type, m_pc, m_access_mode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */);
+ Addr data_addr = m_address.getAddress()+m_store_count;
+ Addr pc_addr = m_pc.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), pc_addr, 0, 0);
+ MemCmd::Command command;
+ if (type == CacheRequestType_IFETCH) {
+ command = MemCmd::ReadReq;
+ request.setFlags(Request::INST_FETCH);
+ } else if (type == CacheRequestType_LD || type == CacheRequestType_IFETCH) {
+ command = MemCmd::ReadReq;
+ } else if (type == CacheRequestType_ST) {
+ command = MemCmd::WriteReq;
+ } else if (type == CacheRequestType_ATOMIC) {
+ command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
+ } else {
+ assert(false);
+ }
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
Sequencer* sequencer_ptr = initiatingSequencer();
- if (sequencer_ptr->isReady(request) == false) {
+ if (sequencer_ptr->isReady(&pkt) == false) {
DEBUG_MSG(TESTER_COMP, MedPrio, "failed to initiate action - sequencer not ready\n");
} else {
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating action - successful\n");
DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
m_status = TesterStatus_Action_Pending;
- sequencer_ptr->makeRequest(request);
+
+ sequencer_ptr->makeRequest(&pkt);
}
DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
}
type = CacheRequestType_IFETCH;
}
- CacheMsg request(m_address, m_address, type, m_pc, m_access_mode, CHECK_SIZE, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */);
+
+ Addr data_addr = m_address.getAddress()+m_store_count;
+ Addr pc_addr = m_pc.getAddress();
+ Request request(0, data_addr, CHECK_SIZE, Flags<unsigned int>(), pc_addr, 0, 0);
+ MemCmd::Command command;
+ if (type == CacheRequestType_IFETCH) {
+ command = MemCmd::ReadReq;
+ request.setFlags(Request::INST_FETCH);
+ } else if (type == CacheRequestType_LD || type == CacheRequestType_IFETCH) {
+ command = MemCmd::ReadReq;
+ } else if (type == CacheRequestType_ST) {
+ command = MemCmd::WriteReq;
+ } else if (type == CacheRequestType_ATOMIC) {
+ command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
+ } else {
+ assert(false);
+ }
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
Sequencer* sequencer_ptr = initiatingSequencer();
- if (sequencer_ptr->isReady(request) == false) {
+ if (sequencer_ptr->isReady(&pkt) == false) {
DEBUG_MSG(TESTER_COMP, MedPrio, "failed to initiate check - sequencer not ready\n");
} else {
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating check - successful\n");
DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
m_status = TesterStatus_Check_Pending;
- sequencer_ptr->makeRequest(request);
+
+ sequencer_ptr->makeRequest(&pkt);
}
DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
}
#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/tester/DeterministicDriver.hh"
#include "mem/protocol/Chip.hh"
+#include "mem/packet.hh"
DetermGETXGenerator::DetermGETXGenerator(NodeID node, DeterministicDriver& driver) :
m_driver(driver)
void DetermGETXGenerator::initiateStore()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Store");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::WriteReq;
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
}
Sequencer* DetermGETXGenerator::sequencer() const
void DetermInvGenerator::initiateLoad()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Load");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 1, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::ReadReq;
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
+
}
void DetermInvGenerator::initiateStore()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Store");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::WriteReq;
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
}
Sequencer* DetermInvGenerator::sequencer() const
void DetermSeriesGETSGenerator::initiateLoad()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Load");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_IFETCH, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::ReadReq;
+ request.setFlags(Request::INST_FETCH);
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
}
Sequencer* DetermSeriesGETSGenerator::sequencer() const
#include "mem/ruby/tester/DetermSeriesGETSGenerator.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/protocol/Chip.hh"
+#include "mem/packet.hh"
DeterministicDriver::DeterministicDriver(RubySystem* sys_ptr)
{
}
}
-void DeterministicDriver::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+void
+DeterministicDriver::hitCallback(Packet * pkt)
{
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
-
+ NodeID proc = pkt->req->contextId();
+ SubBlock data(Address(pkt->getAddr()), pkt->req->getSize());
+ if (pkt->hasData()) {
+ for (int i = 0; i < pkt->req->getSize(); i++) {
+ data.setByte(i, *(pkt->getPtr<uint8>()+i));
+ }
+ }
m_generator_vector[proc]->performCallback(proc, data);
-
- // Mark that we made progress
m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
}
class RubySystem;
class SpecifiedGenerator;
+class Packet;
class DeterministicDriver : public Driver, public Consumer {
public:
void recordLoadLatency(Time time);
void recordStoreLatency(Time time);
- void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void hitCallback(Packet* pkt);
void wakeup();
void printStats(ostream& out) const;
void clearStats() {}
void RequestGenerator::initiateTest()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Test");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 1, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::ReadReq;
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
}
void RequestGenerator::initiateSwap()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Swap");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ATOMIC, Address(2), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 2, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::SwapReq;
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
}
void RequestGenerator::initiateRelease()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Release");
- sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, Address(0), 0 /* only 1 SMT thread */));
+
+ Addr data_addr = m_address.getAddress();
+ Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
+ MemCmd::Command command;
+ command = MemCmd::WriteReq;
+
+ Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+
+ sequencer()->makeRequest(&pkt);
}
Sequencer* RequestGenerator::sequencer() const
}
}
-void SyntheticDriver::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+void
+SyntheticDriver::hitCallback(Packet * pkt)
{
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
- //cout << " " << proc << " in S.D. hitCallback" << endl;
- if(XACT_MEMORY){
- //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
- //reqGen->performCallback(proc, data);
- } else {
- m_request_generator_vector[proc]->performCallback(proc, data);
+ NodeID proc = pkt->req->contextId();
+ SubBlock data(Address(pkt->getAddr()), pkt->req->getSize());
+ if (pkt->hasData()) {
+ for (int i = 0; i < pkt->req->getSize(); i++) {
+ data.setByte(i, *(pkt->getPtr<uint8>()+i));
+ }
}
-
- // Mark that we made progress
+ m_request_generator_vector[proc]->performCallback(proc, data);
m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
}
void recordSwapLatency(Time time);
void recordReleaseLatency(Time time);
- void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void hitCallback(Packet* pkt);
void conflictCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) {assert(0);}
void abortCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
void wakeup();
#include "mem/ruby/tester/main.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/tester/test_framework.hh"
+//#include "mem/ruby/tester/test_framework.hh"
// *******************
// *** tester main ***
int main(int argc, char *argv[])
{
- tester_main(argc, argv);
+ //dsm: PRUNED
+ //tester_main(argc, argv);
}
--- /dev/null
+/*
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Sanchez
+ */
+
+
+#include "arch/isa_traits.hh"
+#include "mem/rubymem.hh"
+#include "sim/eventq.hh"
+#include "sim/host.hh"
+#include "base/output.hh"
+
+// Ruby includes
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/system/Sequencer.hh"
+#include "mem/ruby/init.hh"
+#include "mem/ruby/common/Debug.hh"
+
+#include "sim/sim_exit.hh"
+
+#include <iostream>
+#include <fstream>
+
+using namespace std;
+using namespace TheISA;
+
+RubyMemory::RubyMemory(const Params *p)
+ : PhysicalMemory(p)
+{
+ config_file = p->config_file;
+ config_options = p->config_options;
+ stats_file = p->stats_file;
+ num_cpus = p->num_cpus;
+ ruby_clock = p->clock;
+ ruby_phase = p->phase;
+
+ debug = p->debug;
+ debug_file = p->debug_file;
+}
+
+void
+RubyMemory::init()
+{
+ init_variables();
+ g_NUM_PROCESSORS = num_cpus;
+
+ init_simulator(this);
+
+ if (debug) {
+ g_debug_ptr->setVerbosityString("high");
+ g_debug_ptr->setDebugTime(1);
+ if (debug_file != "") {
+ g_debug_ptr->setDebugOutputFile("ruby.debug");
+ }
+ }
+
+ //You may want to set some other options...
+ //g_debug_ptr->setVerbosityString("med");
+ //g_debug_ptr->setFilterString("lsNqST");
+ //g_debug_ptr->setFilterString("lsNST");
+ //g_debug_ptr->setDebugTime(1);
+ //g_debug_ptr->setDebugOutputFile("ruby.debug");
+
+
+ g_system_ptr->clearStats();
+
+ if (ports.size() == 0) {
+ fatal("RubyMemory object %s is unconnected!", name());
+ }
+
+ for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
+ if (*pi)
+ (*pi)->sendStatusChange(Port::RangeChange);
+ }
+
+ //Print stats at exit
+ RubyExitCallback* rc = new RubyExitCallback(this);
+ registerExitCallback(rc);
+
+ //Sched RubyEvent, automatically reschedules to advance ruby cycles
+ rubyTickEvent = new RubyEvent(this);
+ schedule(rubyTickEvent, curTick + ruby_clock + ruby_phase);
+}
+
+//called by rubyTickEvent
+void RubyMemory::tick() {
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 1);
+ schedule(rubyTickEvent, curTick + ruby_clock); //dsm: clock_phase was added here. This is wrong, the phase is only added on the first tick
+}
+
+
+RubyMemory::~RubyMemory() {
+ delete g_system_ptr;
+}
+
+void
+RubyMemory::hitCallback(Packet* pkt)
+{
+ RubyMemoryPort* port = m_packet_to_port_map[pkt];
+ assert(port != NULL);
+ m_packet_to_port_map.erase(pkt);
+
+ DPRINTF(MemoryAccess, "Hit callback\n");
+
+ bool needsResponse = pkt->needsResponse();
+ doAtomicAccess(pkt);
+
+ // turn packet around to go back to requester if response expected
+ if (needsResponse) {
+ // recvAtomic() should already have turned packet into
+ // atomic response
+ assert(pkt->isResponse());
+ DPRINTF(MemoryAccess, "Sending packet back over port\n");
+ port->sendTiming(pkt);
+ } else {
+ delete pkt;
+ }
+ DPRINTF(MemoryAccess, "Hit callback done!\n");
+}
+
+Port *
+RubyMemory::getPort(const std::string &if_name, int idx)
+{
+ // Accept request for "functional" port for backwards compatibility
+ // with places where this function is called from C++. I'd prefer
+ // to move all these into Python someday.
+ if (if_name == "functional") {
+ return new RubyMemoryPort(csprintf("%s-functional", name()), this);
+ }
+
+ if (if_name != "port") {
+ panic("RubyMemory::getPort: unknown port %s requested", if_name);
+ }
+
+ if (idx >= ports.size()) {
+ ports.resize(idx+1);
+ }
+
+ if (ports[idx] != NULL) {
+ panic("RubyMemory::getPort: port %d already assigned", idx);
+ }
+
+ RubyMemoryPort *port =
+ new RubyMemoryPort(csprintf("%s-port%d", name(), idx), this);
+
+ ports[idx] = port;
+ return port;
+}
+
+RubyMemory::RubyMemoryPort::RubyMemoryPort(const std::string &_name,
+ RubyMemory *_memory)
+ : PhysicalMemory::MemoryPort::MemoryPort(_name, _memory)
+{
+ ruby_mem = _memory;
+}
+
+bool
+RubyMemory::RubyMemoryPort::recvTiming(PacketPtr pkt)
+{
+ DPRINTF(MemoryAccess, "Timing access caught\n");
+
+ //dsm: based on SimpleTimingPort::recvTiming(pkt);
+
+ // If the device is only a slave, it should only be sending
+ // responses, which should never get nacked. There used to be
+ // code to hanldle nacks here, but I'm pretty sure it didn't work
+ // correctly with the drain code, so that would need to be fixed
+ // if we ever added it back.
+ assert(pkt->isRequest());
+
+ if (pkt->memInhibitAsserted()) {
+ warn("memInhibitAsserted???");
+ // snooper will supply based on copy of packet
+ // still target's responsibility to delete packet
+ delete pkt;
+ return true;
+ }
+
+ ruby_mem->m_packet_to_port_map[pkt] = this;
+
+ Sequencer* sequencer = g_system_ptr->getSequencer(pkt->req->contextId());
+
+ if ( ! sequencer->isReady(pkt) ) {
+ DPRINTF(MemoryAccess, "Sequencer isn't ready yet!!\n");
+ return false;
+ }
+
+ DPRINTF(MemoryAccess, "Issuing makeRequest\n");
+
+ sequencer->makeRequest(pkt);
+ return true;
+}
+
+void
+RubyMemory::RubyMemoryPort::sendTiming(PacketPtr pkt)
+{
+ schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
+}
+
+void RubyMemory::printConfigStats()
+{
+ std::ostream *os = simout.create(stats_file);
+ g_system_ptr->printConfig(*os);
+ *os << endl;
+ g_system_ptr->printStats(*os);
+}
+
+
+//Right now these functions seem to be called by RubySystem. If they do calls
+// to RubySystem perform it intended actions, you'll get into an inf loop
+//FIXME what's the purpose of these here?
+void RubyMemory::printStats(std::ostream & out) const {
+ //g_system_ptr->printConfig(out);
+}
+
+void RubyMemory::clearStats() {
+ //g_system_ptr->clearStats();
+}
+
+void RubyMemory::printConfig(std::ostream & out) const {
+ //g_system_ptr->printConfig(out);
+}
+
+
+//Python-interface code
+RubyMemory *
+RubyMemoryParams::create()
+{
+ return new RubyMemory(this);
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Sanchez
+ */
+
+#ifndef __RUBY_MEMORY_HH__
+#define __RUBY_MEMORY_HH__
+
+#include <map>
+
+#include "mem/physical.hh"
+#include "params/RubyMemory.hh"
+#include "base/callback.hh"
+#include "mem/ruby/common/Driver.hh"
+
+class RubyMemory : public PhysicalMemory, public Driver
+{
+ class RubyMemoryPort : public MemoryPort
+ {
+ RubyMemory* ruby_mem;
+
+ public:
+ RubyMemoryPort(const std::string &_name, RubyMemory *_memory);
+ void sendTiming(PacketPtr pkt);
+
+ protected:
+ virtual bool recvTiming(PacketPtr pkt);
+ };
+
+ class RubyEvent : public Event
+ {
+ RubyMemory *ruby_ptr;
+ public:
+ RubyEvent(RubyMemory *p)
+ : Event(), ruby_ptr(p) {}
+
+ virtual void process() { ruby_ptr->tick(); }
+
+ virtual const char *description() const { return "ruby tick"; }
+ };
+
+
+ private:
+ // prevent copying of a RubyMemory object
+ RubyMemory(const RubyMemory &specmem);
+ const RubyMemory &operator=(const RubyMemory &specmem);
+
+ RubyEvent* rubyTickEvent;
+
+ public:
+ typedef RubyMemoryParams Params;
+ RubyMemory(const Params *p);
+ virtual ~RubyMemory();
+
+ public:
+ virtual Port *getPort(const std::string &if_name, int idx = -1);
+ void virtual init();
+
+ //Ruby-related specifics
+ void printConfigStats(); //dsm: Maybe this function should disappear once the configuration options change & M5 determines the stats file to use
+
+ void hitCallback(Packet* pkt); // called by the Ruby sequencer
+
+ void printStats(std::ostream & out) const;
+ void clearStats();
+ void printConfig(std::ostream & out) const;
+
+ void tick();
+
+ private:
+ //Parameters passed
+ std::string config_file, config_options, stats_file, debug_file;
+ bool debug;
+ int num_cpus;
+ Tick ruby_clock, ruby_phase;
+
+ std::map<Packet*, RubyMemoryPort*> m_packet_to_port_map;
+};
+
+class RubyExitCallback : public Callback
+{
+ private:
+ RubyMemory* ruby;
+
+ public:
+ /**
+ * virtualize the destructor to make sure that the correct one
+ * gets called.
+ */
+
+ virtual ~RubyExitCallback() {};
+
+ RubyExitCallback(RubyMemory* rm) {ruby=rm;};
+
+ /**
+ * virtual process function that is invoked when the callback
+ * queue is executed.
+ */
+ virtual void process() {ruby->printConfigStats(); /*delete ruby; was doing double delete...*/};
+};
+
+
+#endif //__RUBY_MEMORY_HH__
+