ruby: reverts to changeset: bf82f1f7b040
authorNilay Vaish <nilay@cs.wisc.edu>
Wed, 19 Aug 2015 15:02:01 +0000 (10:02 -0500)
committerNilay Vaish <nilay@cs.wisc.edu>
Wed, 19 Aug 2015 15:02:01 +0000 (10:02 -0500)
81 files changed:
configs/ruby/Ruby.py
src/cpu/testers/directedtest/RubyDirectedTester.hh
src/cpu/testers/rubytest/RubyTester.hh
src/mem/protocol/MESI_Three_Level-L0cache.sm
src/mem/protocol/MESI_Three_Level-L1cache.sm
src/mem/protocol/MESI_Two_Level-L1cache.sm
src/mem/protocol/MESI_Two_Level-L2cache.sm
src/mem/protocol/MESI_Two_Level-dir.sm
src/mem/protocol/MI_example-cache.sm
src/mem/protocol/MI_example-dir.sm
src/mem/protocol/MOESI_CMP_directory-L1cache.sm
src/mem/protocol/MOESI_CMP_directory-L2cache.sm
src/mem/protocol/MOESI_CMP_directory-dir.sm
src/mem/protocol/MOESI_CMP_directory-dma.sm
src/mem/protocol/MOESI_CMP_token-L1cache.sm
src/mem/protocol/MOESI_CMP_token-dir.sm
src/mem/protocol/MOESI_hammer-cache.sm
src/mem/protocol/MOESI_hammer-dir.sm
src/mem/protocol/RubySlicc_Types.sm
src/mem/ruby/common/DataBlock.hh
src/mem/ruby/common/Histogram.cc
src/mem/ruby/common/Histogram.hh
src/mem/ruby/common/SubBlock.cc
src/mem/ruby/common/SubBlock.hh
src/mem/ruby/common/TypeDefines.hh
src/mem/ruby/filters/H3BloomFilter.cc
src/mem/ruby/filters/H3BloomFilter.hh
src/mem/ruby/filters/MultiBitSelBloomFilter.cc
src/mem/ruby/filters/MultiBitSelBloomFilter.hh
src/mem/ruby/network/MessageBuffer.cc
src/mem/ruby/network/MessageBuffer.hh
src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
src/mem/ruby/network/garnet/flexible-pipeline/flit.cc
src/mem/ruby/network/garnet/flexible-pipeline/flit.hh
src/mem/ruby/network/simple/PerfectSwitch.cc
src/mem/ruby/network/simple/PerfectSwitch.hh
src/mem/ruby/network/simple/SimpleNetwork.cc
src/mem/ruby/network/simple/SimpleNetwork.hh
src/mem/ruby/network/simple/Switch.cc
src/mem/ruby/network/simple/Throttle.cc
src/mem/ruby/network/simple/Throttle.hh
src/mem/ruby/profiler/AccessTraceForAddress.hh
src/mem/ruby/profiler/AddressProfiler.cc
src/mem/ruby/profiler/AddressProfiler.hh
src/mem/ruby/profiler/Profiler.cc
src/mem/ruby/profiler/Profiler.hh
src/mem/ruby/profiler/StoreTrace.cc
src/mem/ruby/profiler/StoreTrace.hh
src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
src/mem/ruby/slicc_interface/AbstractController.hh
src/mem/ruby/structures/AbstractReplacementPolicy.cc
src/mem/ruby/structures/AbstractReplacementPolicy.hh
src/mem/ruby/structures/BankedArray.cc
src/mem/ruby/structures/BankedArray.hh
src/mem/ruby/structures/CacheMemory.cc
src/mem/ruby/structures/CacheMemory.hh
src/mem/ruby/structures/DirectoryMemory.cc
src/mem/ruby/structures/DirectoryMemory.hh
src/mem/ruby/structures/LRUPolicy.cc
src/mem/ruby/structures/LRUPolicy.hh
src/mem/ruby/structures/PseudoLRUPolicy.cc
src/mem/ruby/structures/PseudoLRUPolicy.hh
src/mem/ruby/structures/RubyMemoryControl.cc
src/mem/ruby/structures/RubyMemoryControl.hh
src/mem/ruby/system/CacheRecorder.cc
src/mem/ruby/system/CacheRecorder.hh
src/mem/ruby/system/RubySystem.py
src/mem/ruby/system/Sequencer.cc
src/mem/ruby/system/System.cc
src/mem/ruby/system/System.hh
src/mem/slicc/ast/EnumDeclAST.py
src/mem/slicc/ast/FormalParamAST.py
src/mem/slicc/ast/FuncCallExprAST.py
src/mem/slicc/ast/FuncDeclAST.py
src/mem/slicc/ast/InPortDeclAST.py
src/mem/slicc/ast/MethodCallExprAST.py
src/mem/slicc/ast/StateDeclAST.py
src/mem/slicc/parser.py
src/mem/slicc/symbols/Func.py
src/mem/slicc/symbols/StateMachine.py

index 6d78dd89d609aa6dceccee7d1557101ec9e7d47f..44dbb925fac20295109ddce081853e2166133f65 100644 (file)
@@ -82,6 +82,9 @@ def define_options(parser):
     parser.add_option("--recycle-latency", type="int", default=10,
                       help="Recycle latency for ruby controller input buffers")
 
+    parser.add_option("--random_seed", type="int", default=1234,
+                      help="Used for seeding the random number generator")
+
     protocol = buildEnv['PROTOCOL']
     exec "import %s" % protocol
     eval("%s.define_options(parser)" % protocol)
@@ -231,9 +234,9 @@ def create_system(options, full_system, system, piobus = None, dma_ports = []):
             if buildEnv['TARGET_ISA'] == "x86":
                 cpu_seq.pio_slave_port = piobus.master
 
-    ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks
     ruby._cpu_ports = cpu_sequencers
     ruby.num_of_sequencers = len(cpu_sequencers)
+    ruby.random_seed    = options.random_seed
 
     # Create a backing copy of physical memory in case required
     if options.access_backing_store:
index 74a89117864fe743bdca0469578d973dd19f761f..2a1e7fc1fbb9a1963aef1fd0606982848f4c4bcb 100644 (file)
@@ -109,9 +109,9 @@ class RubyDirectedTester : public MemObject
     RubyDirectedTester(const RubyDirectedTester& obj);
     RubyDirectedTester& operator=(const RubyDirectedTester& obj);
 
-    uint64_t m_requests_completed;
+    uint64 m_requests_completed;
     std::vector<MasterPort*> ports;
-    uint64_t m_requests_to_complete;
+    uint64 m_requests_to_complete;
     DirectedGenerator* generator;
 };
 
index 94a982e3271efa35321a28e3ae840a9d66bf3918..c9f0b8dfccaaf497c338112c755ec9c188c75c10 100644 (file)
@@ -143,10 +143,10 @@ class RubyTester : public MemObject
     std::vector<Cycles> m_last_progress_vector;
 
     int m_num_cpus;
-    uint64_t m_checks_completed;
+    uint64 m_checks_completed;
     std::vector<MasterPort*> writePorts;
     std::vector<MasterPort*> readPorts;
-    uint64_t m_checks_to_complete;
+    uint64 m_checks_to_complete;
     int m_deadlock_threshold;
     int m_num_writers;
     int m_num_readers;
index fb9e762daff66e59f4496508fe1ca2fc278cfa99..8e44766ea052c352426b3fc7e50d4ea6b56a99f7 100644 (file)
@@ -145,22 +145,22 @@ machine(L0Cache, "MESI Directory L0 Cache")
 
   // inclusive cache returns L0 entries only
   Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+    Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
     if(is_valid(Dcache_entry)) {
       return Dcache_entry;
     }
 
-    Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+    Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
     return Icache_entry;
   }
 
   Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+    Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
     return Dcache_entry;
   }
 
   Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+    Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
     return Icache_entry;
   }
 
@@ -189,7 +189,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
       return L0Cache_State_to_permission(tbe.TBEState);
@@ -206,7 +206,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -217,7 +217,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -260,7 +260,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
         assert(in_msg.Dest == machineID);
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
             trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -301,7 +301,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
           if (is_valid(Icache_entry)) {
             // The tag matches for the L0, so the L0 asks the L2 for it.
             trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                    Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                    Icache_entry, TBEs[in_msg.LineAddress]);
           } else {
 
             // Check to see if it is in the OTHER L0
@@ -309,19 +309,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
             if (is_valid(Dcache_entry)) {
               // The block is in the wrong L0, put the request on the queue to the shared L2
               trigger(Event:L0_Replacement, in_msg.LineAddress,
-                      Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                      Dcache_entry, TBEs[in_msg.LineAddress]);
             }
 
             if (Icache.cacheAvail(in_msg.LineAddress)) {
               // L0 does't have the line, but we have space for it
               // in the L0 so let's see if the L2 has it
               trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                      Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                      Icache_entry, TBEs[in_msg.LineAddress]);
             } else {
               // No room in the L0, so we need to make room in the L0
               trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
                       getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
-                      TBEs.lookup(Icache.cacheProbe(in_msg.LineAddress)));
+                      TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         } else {
@@ -331,7 +331,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
           if (is_valid(Dcache_entry)) {
             // The tag matches for the L0, so the L0 ask the L1 for it
             trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                    Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                    Dcache_entry, TBEs[in_msg.LineAddress]);
           } else {
 
             // Check to see if it is in the OTHER L0
@@ -339,19 +339,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
             if (is_valid(Icache_entry)) {
               // The block is in the wrong L0, put the request on the queue to the private L1
               trigger(Event:L0_Replacement, in_msg.LineAddress,
-                      Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                      Icache_entry, TBEs[in_msg.LineAddress]);
             }
 
             if (Dcache.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it
               // in the L0 let's see if the L1 has it
               trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                      Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                      Dcache_entry, TBEs[in_msg.LineAddress]);
             } else {
               // No room in the L1, so we need to make room in the L0
               trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
                       getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
-                      TBEs.lookup(Dcache.cacheProbe(in_msg.LineAddress)));
+                      TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         }
@@ -459,38 +459,21 @@ machine(L0Cache, "MESI Directory L0 Cache")
     }
   }
 
-  action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
+  action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    Dcache.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk);
   }
 
-  action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
+  action(hx_load_hit, "hx", desc="If not prefetch, notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    Icache.setMRU(cache_entry);
-    sequencer.readCallback(address, cache_entry.DataBlk);
-  }
-
-  action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
-    assert(is_valid(cache_entry));
-    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    Dcache.setMRU(cache_entry);
-    sequencer.readCallback(address, cache_entry.DataBlk, true);
-  }
-
-  action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
-    assert(is_valid(cache_entry));
-    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    Icache.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk, true);
   }
 
   action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    Dcache.setMRU(cache_entry);
     sequencer.writeCallback(address, cache_entry.DataBlk);
     cache_entry.Dirty := true;
   }
@@ -498,7 +481,6 @@ machine(L0Cache, "MESI Directory L0 Cache")
   action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    Dcache.setMRU(cache_entry);
     sequencer.writeCallback(address, cache_entry.DataBlk, true);
     cache_entry.Dirty := true;
   }
@@ -507,7 +489,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
     check_allocate(TBEs);
     assert(is_valid(cache_entry));
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     tbe.Dirty := cache_entry.Dirty;
     tbe.DataBlk := cache_entry.DataBlk;
   }
@@ -643,7 +625,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
   }
 
   transition({S,E,M}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
@@ -730,7 +712,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
 
   transition(Inst_IS, Data, S) {
     u_writeInstToCache;
-    hx_ifetch_hit;
+    hx_load_hit;
     s_deallocateTBE;
     o_popIncomingResponseQueue;
     kd_wakeUpDependents;
@@ -738,7 +720,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
 
   transition(Inst_IS, Data_Exclusive, E) {
     u_writeInstToCache;
-    hx_ifetch_hit;
+    hx_load_hit;
     s_deallocateTBE;
     o_popIncomingResponseQueue;
     kd_wakeUpDependents;
index 9bab20deffb874c7a11f66df6df6ae6e760aa07e..6c8df8d754d7879a9a656d78f49ab7eccc130bdd 100644 (file)
@@ -161,7 +161,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // inclusive cache returns L1 entries only
   Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry cache_entry := static_cast(Entry, "pointer", cache.lookup(addr));
+    Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
     return cache_entry;
   }
 
@@ -186,7 +186,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
       return L1Cache_State_to_permission(tbe.TBEState);
@@ -203,7 +203,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -214,7 +214,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -271,7 +271,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
         assert(in_msg.Destination.isElement(machineID));
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
           trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -307,7 +307,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if (in_msg.Type == CoherenceRequestType:INV) {
             if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
@@ -343,7 +343,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
     if (messageBufferFromL0_in.isReady()) {
       peek(messageBufferFromL0_in, CoherenceMsg) {
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if(in_msg.Class == CoherenceClass:INV_DATA) {
             trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
@@ -363,7 +363,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                     // No room in the L1, so we need to make room in the L1
                     Entry victim_entry :=
                         getCacheEntry(cache.cacheProbe(in_msg.addr));
-                    TBE victim_tbe := TBEs.lookup(cache.cacheProbe(in_msg.addr));
+                    TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
 
                     if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
                         trigger(Event:L0_Invalidate_Own,
@@ -628,7 +628,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
     check_allocate(TBEs);
     assert(is_valid(cache_entry));
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     tbe.Dirty := cache_entry.Dirty;
     tbe.DataBlk := cache_entry.DataBlk;
   }
index f4978050dc4a56e2b5798924c8e9d5f86c6070b5..184f735c7dc4526dee53105fbcfcdfb6fca8b36a 100644 (file)
@@ -164,22 +164,22 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // inclusive cache returns L1 entries only
   Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
     if(is_valid(L1Dcache_entry)) {
       return L1Dcache_entry;
     }
 
-    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
     return L1Icache_entry;
   }
 
   Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
     return L1Dcache_entry;
   }
 
   Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
-    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
     return L1Icache_entry;
   }
 
@@ -208,7 +208,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
       return L1Cache_State_to_permission(tbe.TBEState);
@@ -225,7 +225,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -236,7 +236,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -305,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                       // cache. We should drop this request.
                       trigger(prefetch_request_type_to_event(in_msg.Type),
                               in_msg.LineAddress,
-                              L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                              L1Icache_entry, TBEs[in_msg.LineAddress]);
                   }
 
                   // Check to see if it is in the OTHER L1
@@ -315,7 +315,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                       // this request.
                       trigger(prefetch_request_type_to_event(in_msg.Type),
                               in_msg.LineAddress,
-                              L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
                   }
 
                   if (L1Icache.cacheAvail(in_msg.LineAddress)) {
@@ -323,13 +323,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                       // in the L1 so let's see if the L2 has it
                       trigger(prefetch_request_type_to_event(in_msg.Type),
                               in_msg.LineAddress,
-                              L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                              L1Icache_entry, TBEs[in_msg.LineAddress]);
                   } else {
                       // No room in the L1, so we need to make room in the L1
                       trigger(Event:L1_Replacement,
                               L1Icache.cacheProbe(in_msg.LineAddress),
                               getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
-                              TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+                              TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
                   }
               } else {
                   // Data prefetch
@@ -339,7 +339,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                       // cache. We should drop this request.
                       trigger(prefetch_request_type_to_event(in_msg.Type),
                               in_msg.LineAddress,
-                              L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
                   }
 
                   // Check to see if it is in the OTHER L1
@@ -349,7 +349,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                       // request.
                       trigger(prefetch_request_type_to_event(in_msg.Type),
                               in_msg.LineAddress,
-                              L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                              L1Icache_entry, TBEs[in_msg.LineAddress]);
                   }
 
                   if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
@@ -357,13 +357,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
                       // the L1 let's see if the L2 has it
                       trigger(prefetch_request_type_to_event(in_msg.Type),
                               in_msg.LineAddress,
-                              L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
                   } else {
                       // No room in the L1, so we need to make room in the L1
                       trigger(Event:L1_Replacement,
                               L1Dcache.cacheProbe(in_msg.LineAddress),
                               getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
-                              TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+                              TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
                   }
               }
           }
@@ -377,7 +377,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
         assert(in_msg.Destination.isElement(machineID));
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
           trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -417,7 +417,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
         assert(in_msg.Destination.isElement(machineID));
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if (in_msg.Type == CoherenceRequestType:INV) {
           trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
@@ -450,7 +450,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
           if (is_valid(L1Icache_entry)) {
             // The tag matches for the L1, so the L1 asks the L2 for it.
             trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                    L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                    L1Icache_entry, TBEs[in_msg.LineAddress]);
           } else {
 
             // Check to see if it is in the OTHER L1
@@ -458,19 +458,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
             if (is_valid(L1Dcache_entry)) {
               // The block is in the wrong L1, put the request on the queue to the shared L2
               trigger(Event:L1_Replacement, in_msg.LineAddress,
-                      L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                      L1Dcache_entry, TBEs[in_msg.LineAddress]);
             }
 
             if (L1Icache.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it
               // in the L1 so let's see if the L2 has it.
               trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                      L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                      L1Icache_entry, TBEs[in_msg.LineAddress]);
             } else {
               // No room in the L1, so we need to make room in the L1
               trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
                       getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
-                      TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+                      TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         } else {
@@ -480,7 +480,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
           if (is_valid(L1Dcache_entry)) {
             // The tag matches for the L1, so the L1 ask the L2 for it
             trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                    L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                    L1Dcache_entry, TBEs[in_msg.LineAddress]);
           } else {
 
             // Check to see if it is in the OTHER L1
@@ -488,19 +488,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
             if (is_valid(L1Icache_entry)) {
               // The block is in the wrong L1, put the request on the queue to the shared L2
               trigger(Event:L1_Replacement, in_msg.LineAddress,
-                      L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                      L1Icache_entry, TBEs[in_msg.LineAddress]);
             }
 
             if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it
               // in the L1 let's see if the L2 has it.
               trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                      L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+                      L1Dcache_entry, TBEs[in_msg.LineAddress]);
             } else {
               // No room in the L1, so we need to make room in the L1
               trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
                       getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
-                      TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+                      TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         }
@@ -809,47 +809,36 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
     sequencer.invalidateSC(address);
   }
 
-  action(h_load_hit, "hd",
-         desc="Notify sequencer the load completed.")
+  action(h_load_hit, "h",
+         desc="If not prefetch, notify sequencer the load completed.")
   {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Dcache.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk);
   }
 
-  action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
+  action(hx_load_hit, "hx",
+         desc="If not prefetch, notify sequencer the load completed.")
   {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(cache_entry);
-    sequencer.readCallback(address, cache_entry.DataBlk);
-  }
-
-  action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
-  {
-    assert(is_valid(cache_entry));
-    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(address);
-    L1Dcache.setMRU(address);
     sequencer.readCallback(address, cache_entry.DataBlk, true);
   }
 
-  action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
+  action(hh_store_hit, "\h",
+         desc="If not prefetch, notify sequencer that store completed.")
   {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Dcache.setMRU(cache_entry);
     sequencer.writeCallback(address, cache_entry.DataBlk);
     cache_entry.Dirty := true;
   }
 
-  action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
+  action(hhx_store_hit, "\hx",
+         desc="If not prefetch, notify sequencer that store completed.")
   {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(address);
-    L1Dcache.setMRU(address);
     sequencer.writeCallback(address, cache_entry.DataBlk, true);
     cache_entry.Dirty := true;
   }
@@ -858,7 +847,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
     check_allocate(TBEs);
     assert(is_valid(cache_entry));
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     tbe.isPrefetch := false;
     tbe.Dirty := cache_entry.Dirty;
     tbe.DataBlk := cache_entry.DataBlk;
@@ -1091,7 +1080,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   transition({S,E,M}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
index 739a6f713a95347ce7a0ee6a514e4927916d193b..e4f719d9f993779a0deb4ade3b6a58e9e4f093d8 100644 (file)
@@ -157,7 +157,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
 
   // inclusive cache, returns L2 entries only
   Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
-    return static_cast(Entry, "pointer", L2cache.lookup(addr));
+    return static_cast(Entry, "pointer", L2cache[addr]);
   }
 
   bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
@@ -196,7 +196,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
       return L2Cache_State_to_permission(tbe.TBEState);
@@ -213,7 +213,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -224,7 +224,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -288,7 +288,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
     if(L1unblockNetwork_in.isReady()) {
       peek(L1unblockNetwork_in,  ResponseMsg) {
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
                 in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
                 in_msg.Sender, in_msg.Type, in_msg.Destination);
@@ -312,7 +312,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
         // test wether it's from a local L1 or an off chip source
         assert(in_msg.Destination.isElement(machineID));
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
           if(in_msg.Type == CoherenceResponseType:DATA) {
@@ -351,7 +351,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
     if(L1RequestL2Network_in.isReady()) {
       peek(L1RequestL2Network_in,  RequestMsg) {
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
                 in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
@@ -376,10 +376,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
             Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
             if (isDirty(L2cache_entry)) {
               trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
-                      L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+                      L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
             } else {
               trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
-                      L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+                      L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
             }
           }
         }
@@ -591,7 +591,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
     check_allocate(TBEs);
     assert(is_valid(cache_entry));
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     tbe.L1_GetS_IDs.clear();
     tbe.DataBlk := cache_entry.DataBlk;
     tbe.Dirty := cache_entry.Dirty;
index 6c5c84f2f8e5be2e7d6d9fbdfbd70c41fac26be1..22aabee4ea27a21b905ca63db66f2c5ab3bf5d92 100644 (file)
@@ -101,7 +101,7 @@ machine(Directory, "MESI Two Level directory protocol")
   void wakeUpBuffers(Addr a);
 
   Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
-    Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
 
     if (is_valid(dir_entry)) {
       return dir_entry;
@@ -133,7 +133,7 @@ machine(Directory, "MESI Two Level directory protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
       return Directory_State_to_permission(tbe.TBEState);
@@ -149,7 +149,7 @@ machine(Directory, "MESI Two Level directory protocol")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -160,7 +160,7 @@ machine(Directory, "MESI Two Level directory protocol")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -194,13 +194,13 @@ machine(Directory, "MESI Two Level directory protocol")
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (isGETRequest(in_msg.Type)) {
-          trigger(Event:Fetch, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
           trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
           trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else {
           DPRINTF(RubySlicc, "%s\n", in_msg);
           error("Invalid message");
@@ -214,9 +214,9 @@ machine(Directory, "MESI Two Level directory protocol")
       peek(responseNetwork_in, ResponseMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
-          trigger(Event:Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:ACK) {
-          trigger(Event:CleanReplacement, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
         } else {
           DPRINTF(RubySlicc, "%s\n", in_msg.Type);
           error("Invalid message");
@@ -230,9 +230,9 @@ machine(Directory, "MESI Two Level directory protocol")
     if (memQueue_in.isReady()) {
       peek(memQueue_in, MemoryMsg) {
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
-          trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
-          trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
         } else {
           DPRINTF(RubySlicc, "%s\n", in_msg.Type);
           error("Invalid message");
@@ -390,7 +390,7 @@ machine(Directory, "MESI Two Level directory protocol")
   action(v_allocateTBE, "v", desc="Allocate TBE") {
     peek(requestNetwork_in, RequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.DataBlk := in_msg.DataBlk;
       tbe.PhysicalAddress := in_msg.addr;
       tbe.Len := in_msg.Len;
index d247ce663ab20616ec16fb37bca2c25a47fab245..3380cd7e6476210aa8bd7cb33a9989a83db02f75 100644 (file)
@@ -152,7 +152,7 @@ machine(L1Cache, "MI Example L1 Cache")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       return L1Cache_State_to_permission(tbe.TBEState);
     }
@@ -172,7 +172,7 @@ machine(L1Cache, "MI Example L1 Cache")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -183,7 +183,7 @@ machine(L1Cache, "MI Example L1 Cache")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -205,7 +205,7 @@ machine(L1Cache, "MI Example L1 Cache")
       peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if (in_msg.Type == CoherenceRequestType:GETX) {
           trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
@@ -231,7 +231,7 @@ machine(L1Cache, "MI Example L1 Cache")
       peek(responseNetwork_in, ResponseMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if (in_msg.Type == CoherenceResponseType:DATA) {
           trigger(Event:Data, in_msg.addr, cache_entry, tbe);
@@ -254,11 +254,11 @@ machine(L1Cache, "MI Example L1 Cache")
           // make room for the block
           trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
                   getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
-                  TBEs.lookup(cacheMemory.cacheProbe(in_msg.LineAddress)));
+                  TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
         }
         else {
           trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
-                  cache_entry, TBEs.lookup(in_msg.LineAddress));
+                  cache_entry, TBEs[in_msg.LineAddress]);
         }
       }
     }
@@ -353,7 +353,6 @@ machine(L1Cache, "MI Example L1 Cache")
   action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
-    cacheMemory.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk, false);
   }
 
@@ -361,7 +360,6 @@ machine(L1Cache, "MI Example L1 Cache")
     peek(responseNetwork_in, ResponseMsg) {
       assert(is_valid(cache_entry));
       DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
-      cacheMemory.setMRU(cache_entry);
       sequencer.readCallback(address, cache_entry.DataBlk, true,
                              machineIDToMachineType(in_msg.Sender));
     }
@@ -370,7 +368,6 @@ machine(L1Cache, "MI Example L1 Cache")
   action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
-    cacheMemory.setMRU(cache_entry);
     sequencer.writeCallback(address, cache_entry.DataBlk, false);
   }
 
@@ -378,7 +375,6 @@ machine(L1Cache, "MI Example L1 Cache")
     peek(responseNetwork_in, ResponseMsg) {
       assert(is_valid(cache_entry));
       DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
-      cacheMemory.setMRU(cache_entry);
       sequencer.writeCallback(address, cache_entry.DataBlk, true,
                               machineIDToMachineType(in_msg.Sender));
     }
@@ -400,7 +396,7 @@ machine(L1Cache, "MI Example L1 Cache")
 
   action(v_allocateTBE, "v", desc="Allocate TBE") {
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
   }
 
   action(w_deallocateTBE, "w", desc="Deallocate TBE") {
index c9f6b9be6578ff8cf1734051f7a0ce3b8c1ea152..a22691bda56b85a1c964fa4bfbe9283060869bf4 100644 (file)
@@ -111,7 +111,7 @@ machine(Directory, "Directory protocol")
   void unset_tbe();
 
   Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
-    Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
 
     if (is_valid(dir_entry)) {
       return dir_entry;
@@ -155,7 +155,7 @@ machine(Directory, "Directory protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       return Directory_State_to_permission(tbe.TBEState);
     }
@@ -174,7 +174,7 @@ machine(Directory, "Directory protocol")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -185,7 +185,7 @@ machine(Directory, "Directory protocol")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
             testAndWrite(addr, tbe.DataBlk, pkt);
@@ -207,7 +207,7 @@ machine(Directory, "Directory protocol")
   in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
     if (dmaRequestQueue_in.isReady()) {
       peek(dmaRequestQueue_in, DMARequestMsg) {
-        TBE tbe := TBEs.lookup(in_msg.LineAddress);
+        TBE tbe := TBEs[in_msg.LineAddress];
         if (in_msg.Type == DMARequestType:READ) {
           trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
         } else if (in_msg.Type == DMARequestType:WRITE) {
@@ -222,7 +222,7 @@ machine(Directory, "Directory protocol")
   in_port(requestQueue_in, RequestMsg, requestToDir) {
     if (requestQueue_in.isReady()) {
       peek(requestQueue_in, RequestMsg) {
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == CoherenceRequestType:GETS) {
           trigger(Event:GETS, in_msg.addr, tbe);
         } else if (in_msg.Type == CoherenceRequestType:GETX) {
@@ -245,7 +245,7 @@ machine(Directory, "Directory protocol")
   in_port(memQueue_in, MemoryMsg, responseFromMemory) {
     if (memQueue_in.isReady()) {
       peek(memQueue_in, MemoryMsg) {
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
           trigger(Event:Memory_Data, in_msg.addr, tbe);
         } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
@@ -403,7 +403,7 @@ machine(Directory, "Directory protocol")
   action(v_allocateTBE, "v", desc="Allocate TBE") {
     peek(dmaRequestQueue_in, DMARequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.DataBlk := in_msg.DataBlk;
       tbe.PhysicalAddress := in_msg.PhysicalAddress;
       tbe.Len := in_msg.Len;
@@ -414,7 +414,7 @@ machine(Directory, "Directory protocol")
   action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
     peek(dmaRequestQueue_in, DMARequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.DmaRequestor := in_msg.Requestor;
     }
   }
@@ -422,7 +422,7 @@ machine(Directory, "Directory protocol")
   action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
     peek(requestQueue_in, RequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.DataBlk := in_msg.DataBlk;
     }
   }
index 7a8f35333c5d759c5cb1cccae19cf185e05b13a8..8a2eee1e29875562d446ab719a5970f58d7da070 100644 (file)
@@ -190,7 +190,7 @@ machine(L1Cache, "Directory protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
       return L1Cache_State_to_permission(tbe.TBEState);
@@ -217,7 +217,7 @@ machine(L1Cache, "Directory protocol")
     if(is_valid(cache_entry)) {
       testAndRead(addr, cache_entry.DataBlk, pkt);
     } else {
-      TBE tbe := TBEs.lookup(addr);
+      TBE tbe := TBEs[addr];
       if(is_valid(tbe)) {
         testAndRead(addr, tbe.DataBlk, pkt);
       } else {
@@ -236,7 +236,7 @@ machine(L1Cache, "Directory protocol")
       return num_functional_writes;
     }
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
     return num_functional_writes;
@@ -269,7 +269,7 @@ machine(L1Cache, "Directory protocol")
     if (useTimerTable_in.isReady()) {
         trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
                 getCacheEntry(useTimerTable.readyAddress()),
-                TBEs.lookup(useTimerTable.readyAddress()));
+                TBEs[useTimerTable.readyAddress()]);
     }
   }
 
@@ -279,7 +279,7 @@ machine(L1Cache, "Directory protocol")
       peek(triggerQueue_in, TriggerMsg) {
         if (in_msg.Type == TriggerType:ALL_ACKS) {
           trigger(Event:All_acks, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -299,29 +299,29 @@ machine(L1Cache, "Directory protocol")
         if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
           if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
             trigger(Event:Own_GETX, in_msg.addr,
-                    getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
           } else {
             trigger(Event:Fwd_GETX, in_msg.addr,
-                    getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
           }
         } else if (in_msg.Type == CoherenceRequestType:GETS) {
           trigger(Event:Fwd_GETS, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
           trigger(Event:Fwd_DMA, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
           trigger(Event:Writeback_Ack, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
           trigger(Event:Writeback_Ack_Data, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
           trigger(Event:Writeback_Nack, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:INV) {
           trigger(Event:Inv, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -335,13 +335,13 @@ machine(L1Cache, "Directory protocol")
       peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
         if (in_msg.Type == CoherenceResponseType:ACK) {
           trigger(Event:Ack, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:DATA) {
           trigger(Event:Data, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
           trigger(Event:Exclusive_Data, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -365,7 +365,7 @@ machine(L1Cache, "Directory protocol")
             // The tag matches for the L1, so the L1 asks the L2 for it.
             trigger(mandatory_request_type_to_event(in_msg.Type),
                     in_msg.LineAddress, L1Icache_entry,
-                    TBEs.lookup(in_msg.LineAddress));
+                    TBEs[in_msg.LineAddress]);
           } else {
 
             Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
@@ -373,19 +373,19 @@ machine(L1Cache, "Directory protocol")
             if (is_valid(L1Dcache_entry)) {
               // The block is in the wrong L1, put the request on the queue to the shared L2
               trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
-                      TBEs.lookup(in_msg.LineAddress));
+                      TBEs[in_msg.LineAddress]);
             }
             if (L1Icache.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
               trigger(mandatory_request_type_to_event(in_msg.Type),
                       in_msg.LineAddress, L1Icache_entry,
-                      TBEs.lookup(in_msg.LineAddress));
+                      TBEs[in_msg.LineAddress]);
             } else {
               // No room in the L1, so we need to make room in the L1
               trigger(Event:L1_Replacement,
                       L1Icache.cacheProbe(in_msg.LineAddress),
                       getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
-                      TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+                      TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         } else {
@@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
             // The tag matches for the L1, so the L1 ask the L2 for it
             trigger(mandatory_request_type_to_event(in_msg.Type),
                     in_msg.LineAddress, L1Dcache_entry,
-                    TBEs.lookup(in_msg.LineAddress));
+                    TBEs[in_msg.LineAddress]);
           } else {
 
             Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
@@ -404,19 +404,19 @@ machine(L1Cache, "Directory protocol")
             if (is_valid(L1Icache_entry)) {
               // The block is in the wrong L1, put the request on the queue to the shared L2
               trigger(Event:L1_Replacement, in_msg.LineAddress,
-                      L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+                      L1Icache_entry, TBEs[in_msg.LineAddress]);
             }
             if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
               trigger(mandatory_request_type_to_event(in_msg.Type),
                       in_msg.LineAddress, L1Dcache_entry,
-                      TBEs.lookup(in_msg.LineAddress));
+                      TBEs[in_msg.LineAddress]);
             } else {
               // No room in the L1, so we need to make room in the L1
               trigger(Event:L1_Replacement,
                       L1Dcache.cacheProbe(in_msg.LineAddress),
                       getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
-                      TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+                      TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         }
@@ -635,32 +635,21 @@ machine(L1Cache, "Directory protocol")
     }
   }
 
-  action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+  action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Dcache.setMRU(cache_entry);
-    sequencer.readCallback(address, cache_entry.DataBlk);
-  }
-
-  action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
-    assert(is_valid(cache_entry));
-    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk);
   }
 
   action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(address);
-    L1Dcache.setMRU(address);
     sequencer.readCallback(address, cache_entry.DataBlk, true);
   }
 
   action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Dcache.setMRU(cache_entry);
     sequencer.writeCallback(address, cache_entry.DataBlk);
     cache_entry.Dirty := true;
   }
@@ -668,8 +657,6 @@ machine(L1Cache, "Directory protocol")
   action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(address);
-    L1Dcache.setMRU(address);
     sequencer.writeCallback(address, cache_entry.DataBlk, true);
     cache_entry.Dirty := true;
   }
@@ -677,7 +664,7 @@ machine(L1Cache, "Directory protocol")
   action(i_allocateTBE, "i", desc="Allocate TBE") {
     check_allocate(TBEs);
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     assert(is_valid(cache_entry));
     tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
     tbe.Dirty := cache_entry.Dirty;
@@ -977,7 +964,7 @@ machine(L1Cache, "Directory protocol")
   }
 
   transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
index e1d665292eaf1a7f0fe91bc457b27c8cca6bfbed..38c6e9f9be1fc3315e2bfdebfd875f27bcd981ef 100644 (file)
@@ -232,7 +232,7 @@ machine(L2Cache, "Token protocol")
   void unset_tbe();
 
   Entry getCacheEntry(Addr address), return_by_pointer="yes" {
-    return static_cast(Entry, "pointer", L2cache.lookup(address));
+    return static_cast(Entry, "pointer", L2cache[address]);
   }
 
   bool isDirTagPresent(Addr addr) {
@@ -519,7 +519,7 @@ machine(L2Cache, "Token protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
       return L2Cache_State_to_permission(tbe.TBEState);
@@ -542,7 +542,7 @@ machine(L2Cache, "Token protocol")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -553,7 +553,7 @@ machine(L2Cache, "Token protocol")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -582,7 +582,7 @@ machine(L2Cache, "Token protocol")
       peek(triggerQueue_in, TriggerMsg) {
         if (in_msg.Type == TriggerType:ALL_ACKS) {
           trigger(Event:All_Acks, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -598,26 +598,26 @@ machine(L2Cache, "Token protocol")
         if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
           if (in_msg.Requestor == machineID) {
             trigger(Event:Own_GETX, in_msg.addr,
-                    getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
           } else {
             trigger(Event:Fwd_GETX, in_msg.addr,
-                    getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
           }
         } else if (in_msg.Type == CoherenceRequestType:GETS) {
           trigger(Event:Fwd_GETS, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
           trigger(Event:Fwd_DMA, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:INV) {
           trigger(Event:Inv, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
           trigger(Event:Writeback_Ack, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
           trigger(Event:Writeback_Nack, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -631,25 +631,25 @@ machine(L2Cache, "Token protocol")
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceRequestType:GETX) {
           trigger(Event:L1_GETX, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:GETS) {
             trigger(Event:L1_GETS, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:PUTO) {
           trigger(Event:L1_PUTO, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:PUTX) {
           trigger(Event:L1_PUTX, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:PUTS) {
           Entry cache_entry := getCacheEntry(in_msg.addr);
           if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
             trigger(Event:L1_PUTS_only, in_msg.addr,
-                    cache_entry, TBEs.lookup(in_msg.addr));
+                    cache_entry, TBEs[in_msg.addr]);
           }
           else {
             trigger(Event:L1_PUTS, in_msg.addr,
-                    cache_entry, TBEs.lookup(in_msg.addr));
+                    cache_entry, TBEs[in_msg.addr]);
           }
         } else {
           error("Unexpected message");
@@ -667,35 +667,35 @@ machine(L2Cache, "Token protocol")
         if (in_msg.Type == CoherenceResponseType:ACK) {
           if (in_msg.SenderMachine == MachineType:L2Cache) {
             trigger(Event:ExtAck, in_msg.addr,
-                    getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
           }
           else {
             trigger(Event:IntAck, in_msg.addr,
-                    getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
           }
         } else if (in_msg.Type == CoherenceResponseType:DATA) {
           trigger(Event:Data, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
           trigger(Event:Data_Exclusive, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
           trigger(Event:Unblock, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
           trigger(Event:Exclusive_Unblock, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
           Entry cache_entry := getCacheEntry(in_msg.addr);
           if (is_invalid(cache_entry) &&
                    L2cache.cacheAvail(in_msg.addr) == false) {
             trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
                     getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
-                    TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+                    TBEs[L2cache.cacheProbe(in_msg.addr)]);
           }
           else {
             trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
-                    cache_entry, TBEs.lookup(in_msg.addr));
+                    cache_entry, TBEs[in_msg.addr]);
           }
         } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
           Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -703,15 +703,15 @@ machine(L2Cache, "Token protocol")
                    L2cache.cacheAvail(in_msg.addr) == false) {
             trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
                     getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
-                    TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+                    TBEs[L2cache.cacheProbe(in_msg.addr)]);
           }
           else {
             trigger(Event:L1_WBCLEANDATA, in_msg.addr,
-                    cache_entry, TBEs.lookup(in_msg.addr));
+                    cache_entry, TBEs[in_msg.addr]);
           }
         } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
           trigger(Event:DmaAck, in_msg.addr,
-                  getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -1223,7 +1223,7 @@ machine(L2Cache, "Token protocol")
   action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
     check_allocate(TBEs);
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     if(is_valid(cache_entry)) {
       tbe.DataBlk := cache_entry.DataBlk;
       tbe.Dirty := cache_entry.Dirty;
index ba58a6e9a98c2bf2a97ef6beb0cebff89e84d4e6..dcd37cc33ef17e8acb8d616a94a1cf7453932efc 100644 (file)
@@ -122,7 +122,7 @@ machine(Directory, "Directory protocol")
   void unset_tbe();
 
   Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
-    Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
 
     if (is_valid(dir_entry)) {
       return dir_entry;
@@ -234,26 +234,26 @@ machine(Directory, "Directory protocol")
         if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
           if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
             trigger(Event:Last_Unblock, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else {
             trigger(Event:Unblock, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           }
         } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
           trigger(Event:Exclusive_Unblock, in_msg.addr,
-                  TBEs.lookup(in_msg.addr));
+                  TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
           trigger(Event:Dirty_Writeback, in_msg.addr,
-                  TBEs.lookup(in_msg.addr));
+                  TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
           trigger(Event:Clean_Writeback, in_msg.addr,
-                  TBEs.lookup(in_msg.addr));
+                  TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
           trigger(Event:Data, in_msg.addr,
-                  TBEs.lookup(in_msg.addr));
+                  TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
           trigger(Event:DMA_ACK, in_msg.addr,
-                  TBEs.lookup(in_msg.addr));
+                  TBEs[in_msg.addr]);
         } else {
           error("Invalid message");
         }
@@ -265,21 +265,21 @@ machine(Directory, "Directory protocol")
     if (requestQueue_in.isReady()) {
       peek(requestQueue_in, RequestMsg) {
         if (in_msg.Type == CoherenceRequestType:GETS) {
-          trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:GETX) {
-          trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:PUTX) {
-          trigger(Event:PUTX, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:PUTO) {
-          trigger(Event:PUTO, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
-          trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
           trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
           trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else {
           error("Invalid message");
         }
@@ -292,9 +292,9 @@ machine(Directory, "Directory protocol")
     if (memQueue_in.isReady()) {
       peek(memQueue_in, MemoryMsg) {
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
-          trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
-          trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
         } else {
           DPRINTF(RubySlicc, "%s\n", in_msg.Type);
           error("Invalid message");
@@ -540,7 +540,7 @@ machine(Directory, "Directory protocol")
   action(v_allocateTBE, "v", desc="Allocate TBE entry") {
     peek (requestQueue_in, RequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.PhysicalAddress := in_msg.addr;
       tbe.Len := in_msg.Len;
       tbe.DataBlk := in_msg.DataBlk;
index 75c621243fc41534b58c83b6cb3fcb467c0e4fbd..e9931f25baa97957e37f00120679ec5aef25d256 100644 (file)
@@ -108,10 +108,10 @@ machine(DMA, "DMA Controller")
       peek(dmaRequestQueue_in, SequencerMsg) {
         if (in_msg.Type == SequencerRequestType:LD ) {
           trigger(Event:ReadRequest, in_msg.LineAddress,
-                  TBEs.lookup(in_msg.LineAddress));
+                  TBEs[in_msg.LineAddress]);
         } else if (in_msg.Type == SequencerRequestType:ST) {
           trigger(Event:WriteRequest, in_msg.LineAddress,
-                  TBEs.lookup(in_msg.LineAddress));
+                  TBEs[in_msg.LineAddress]);
         } else {
           error("Invalid request type");
         }
@@ -124,14 +124,14 @@ machine(DMA, "DMA Controller")
       peek( dmaResponseQueue_in, ResponseMsg) {
         if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
           trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
        in_msg.Type == CoherenceResponseType:DATA) {
           trigger(Event:Data, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else if (in_msg.Type == CoherenceResponseType:ACK) {
           trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
-                  TBEs.lookup(makeLineAddress(in_msg.addr)));
+                  TBEs[makeLineAddress(in_msg.addr)]);
         } else {
           error("Invalid response type");
         }
@@ -144,7 +144,7 @@ machine(DMA, "DMA Controller")
     if (triggerQueue_in.isReady()) {
       peek(triggerQueue_in, TriggerMsg) {
         if (in_msg.Type == TriggerType:ALL_ACKS) {
-          trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
         } else {
           error("Unexpected message");
         }
@@ -240,7 +240,7 @@ machine(DMA, "DMA Controller")
 
   action(v_allocateTBE, "v", desc="Allocate TBE entry") {
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
   }
 
   action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
index 1d47f1c8a4e571e16f177e834d12e1137fdbbb88..af6e4c0d5a7050f84a934d94319001150ffe3f18 100644 (file)
@@ -366,7 +366,7 @@ machine(L1Cache, "Token protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := L1_TBEs.lookup(addr);
+    TBE tbe := L1_TBEs[addr];
     if(is_valid(tbe)) {
       return L1Cache_State_to_permission(tbe.TBEState);
     }
@@ -459,7 +459,7 @@ machine(L1Cache, "Token protocol")
   // Use Timer
   in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
     if (useTimerTable_in.isReady()) {
-      TBE tbe := L1_TBEs.lookup(useTimerTable.readyAddress());
+      TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
 
       if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
           (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
@@ -487,7 +487,7 @@ machine(L1Cache, "Token protocol")
     if (reissueTimerTable_in.isReady()) {
       trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
               getCacheEntry(reissueTimerTable.readyAddress()),
-              L1_TBEs.lookup(reissueTimerTable.readyAddress()));
+              L1_TBEs[reissueTimerTable.readyAddress()]);
     }
   }
 
@@ -510,7 +510,7 @@ machine(L1Cache, "Token protocol")
 
         // React to the message based on the current state of the table
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := L1_TBEs.lookup(in_msg.addr);
+        TBE tbe := L1_TBEs[in_msg.addr];
 
         if (persistentTable.isLocked(in_msg.addr)) {
           if (persistentTable.findSmallest(in_msg.addr) == machineID) {
@@ -548,7 +548,7 @@ machine(L1Cache, "Token protocol")
         assert(in_msg.Destination.isElement(machineID));
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := L1_TBEs.lookup(in_msg.addr);
+        TBE tbe := L1_TBEs[in_msg.addr];
 
         // Mark TBE flag if response received off-chip.  Use this to update average latency estimate
         if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
@@ -559,7 +559,7 @@ machine(L1Cache, "Token protocol")
 
             // came from an off-chip L2 cache
             if (is_valid(tbe)) {
-               // L1_TBEs.lookup(in_msg.addr).ExternalResponse := true;
+               // L1_TBEs[in_msg.addr].ExternalResponse := true;
                // profile_offchipL2_response(in_msg.addr);
             }
           }
@@ -619,7 +619,7 @@ machine(L1Cache, "Token protocol")
         assert(in_msg.Destination.isElement(machineID));
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := L1_TBEs.lookup(in_msg.addr);
+        TBE tbe := L1_TBEs[in_msg.addr];
 
         if (in_msg.Type == CoherenceRequestType:GETX) {
           if (in_msg.isLocal) {
@@ -665,7 +665,7 @@ machine(L1Cache, "Token protocol")
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
 
-        TBE tbe := L1_TBEs.lookup(in_msg.LineAddress);
+        TBE tbe := L1_TBEs[in_msg.LineAddress];
 
         if (in_msg.Type == RubyRequestType:IFETCH) {
           // ** INSTRUCTION ACCESS ***
@@ -695,7 +695,7 @@ machine(L1Cache, "Token protocol")
               trigger(Event:L1_Replacement,
                       L1Icache.cacheProbe(in_msg.LineAddress),
                       getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
-                      L1_TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+                      L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         } else {
@@ -726,7 +726,7 @@ machine(L1Cache, "Token protocol")
               trigger(Event:L1_Replacement,
                       L1Dcache.cacheProbe(in_msg.LineAddress),
                       getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
-                      L1_TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+                      L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
             }
           }
         }
@@ -1284,22 +1284,12 @@ machine(L1Cache, "Token protocol")
     }
   }
 
-  action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
-    assert(is_valid(cache_entry));
-    DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
-            address, cache_entry.DataBlk);
 
-    L1Dcache.setMRU(cache_entry);
-    sequencer.readCallback(address, cache_entry.DataBlk, false,
-                           MachineType:L1Cache);
-  }
-
-  action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
+  action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
             address, cache_entry.DataBlk);
 
-    L1Icache.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk, false,
                            MachineType:L1Cache);
   }
@@ -1309,8 +1299,6 @@ machine(L1Cache, "Token protocol")
     DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
             address, cache_entry.DataBlk);
     peek(responseNetwork_in, ResponseMsg) {
-      L1Icache.setMRU(address);
-      L1Dcache.setMRU(address);
       sequencer.readCallback(address, cache_entry.DataBlk,
                              isExternalHit(address, in_msg.Sender),
                              machineIDToMachineType(in_msg.Sender));
@@ -1322,7 +1310,6 @@ machine(L1Cache, "Token protocol")
     DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
             address, cache_entry.DataBlk);
 
-    L1Dcache.setMRU(cache_entry);
     sequencer.writeCallback(address, cache_entry.DataBlk, false,
                             MachineType:L1Cache);
     cache_entry.Dirty := true;
@@ -1334,8 +1321,6 @@ machine(L1Cache, "Token protocol")
     DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
             address, cache_entry.DataBlk);
     peek(responseNetwork_in, ResponseMsg) {
-      L1Icache.setMRU(address);
-      L1Dcache.setMRU(address);
       sequencer.writeCallback(address, cache_entry.DataBlk,
                               isExternalHit(address, in_msg.Sender),
                               machineIDToMachineType(in_msg.Sender));
@@ -1347,7 +1332,7 @@ machine(L1Cache, "Token protocol")
   action(i_allocateTBE, "i", desc="Allocate TBE") {
     check_allocate(L1_TBEs);
     L1_TBEs.allocate(address);
-    set_tbe(L1_TBEs.lookup(address));
+    set_tbe(L1_TBEs[address]);
     tbe.IssueCount := 0;
     peek(mandatoryQueue_in, RubyRequest) {
       tbe.PC := in_msg.ProgramCounter;
@@ -1717,7 +1702,7 @@ machine(L1Cache, "Token protocol")
   }
 
   transition({S, SM, S_L, SM_L}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
@@ -1799,7 +1784,7 @@ machine(L1Cache, "Token protocol")
 
   // Transitions from Owned
   transition({O, OM}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
@@ -1889,7 +1874,7 @@ machine(L1Cache, "Token protocol")
 
   // Transitions from Modified
   transition({MM, MM_W}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
@@ -1964,7 +1949,7 @@ machine(L1Cache, "Token protocol")
 
   // Transitions from Dirty Exclusive
   transition({M, M_W}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileInstHit;
     k_popMandatoryQueue;
   }
index fd6a62ef22e77fd0949d012ae79a620fd60aba09..fdef75181f25c4ae9b48dd0c76f881d2e61b2d96 100644 (file)
@@ -175,7 +175,7 @@ machine(Directory, "Token protocol")
   void unset_tbe();
 
   Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
-    Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
 
     if (is_valid(dir_entry)) {
       return dir_entry;
@@ -218,7 +218,7 @@ machine(Directory, "Token protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       return Directory_State_to_permission(tbe.TBEState);
     }
@@ -245,7 +245,7 @@ machine(Directory, "Token protocol")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -256,7 +256,7 @@ machine(Directory, "Token protocol")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
             testAndWrite(addr, tbe.DataBlk, pkt);
@@ -280,9 +280,9 @@ machine(Directory, "Token protocol")
     if (memQueue_in.isReady()) {
       peek(memQueue_in, MemoryMsg) {
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
-          trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
-          trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
         } else {
           DPRINTF(RubySlicc, "%s\n", in_msg.Type);
           error("Invalid message");
@@ -295,7 +295,7 @@ machine(Directory, "Token protocol")
   in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
     if (reissueTimerTable_in.isReady()) {
       trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
-              TBEs.lookup(reissueTimerTable.readyAddress()));
+              TBEs[reissueTimerTable.readyAddress()]);
     }
   }
 
@@ -307,13 +307,13 @@ machine(Directory, "Token protocol")
           if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
               (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
             trigger(Event:Data_All_Tokens, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
             trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else if (in_msg.Type == CoherenceResponseType:ACK) {
             trigger(Event:Ack_All_Tokens, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else {
             DPRINTF(RubySlicc, "%s\n", in_msg.Type);
             error("Invalid message");
@@ -321,14 +321,14 @@ machine(Directory, "Token protocol")
         } else {
           if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
             trigger(Event:Data_Owner, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
                      (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
             trigger(Event:Tokens, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
             trigger(Event:Ack_Owner, in_msg.addr,
-                    TBEs.lookup(in_msg.addr));
+                    TBEs[in_msg.addr]);
           } else {
             DPRINTF(RubySlicc, "%s\n", in_msg.Type);
             error("Invalid message");
@@ -360,38 +360,38 @@ machine(Directory, "Token protocol")
             if (persistentTable.findSmallest(in_msg.addr) == machineID) {
               if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
                 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
-                        TBEs.lookup(in_msg.addr));
+                        TBEs[in_msg.addr]);
               } else {
                 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
-                        TBEs.lookup(in_msg.addr));
+                        TBEs[in_msg.addr]);
               }
             } else {
               // locked
-              trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+              trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
             }
           } else {
             // unlocked
-            trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+            trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
           }
         }
         else {
           if (persistentTable.findSmallest(in_msg.addr) == machineID) {
               if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
                 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
-                        TBEs.lookup(in_msg.addr));
+                        TBEs[in_msg.addr]);
               } else {
                 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
-                        TBEs.lookup(in_msg.addr));
+                        TBEs[in_msg.addr]);
               }
           } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
             // locked
-            trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+            trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
           } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
             // locked
-            trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+            trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
           } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
             // unlocked
-            trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+            trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
           } else {
             error("Invalid message");
           }
@@ -405,9 +405,9 @@ machine(Directory, "Token protocol")
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceRequestType:GETS) {
-          trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
         } else if (in_msg.Type == CoherenceRequestType:GETX) {
-          trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+          trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
         } else {
           error("Invalid message");
         }
@@ -419,14 +419,14 @@ machine(Directory, "Token protocol")
     if (dmaRequestQueue_in.isReady()) {
       peek(dmaRequestQueue_in, DMARequestMsg) {
         if (in_msg.Type == DMARequestType:READ) {
-          trigger(Event:DMA_READ, in_msg.LineAddress, TBEs.lookup(in_msg.LineAddress));
+          trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
         } else if (in_msg.Type == DMARequestType:WRITE) {
           if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
             trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
-                    TBEs.lookup(in_msg.LineAddress));
+                    TBEs[in_msg.LineAddress]);
           } else {
             trigger(Event:DMA_WRITE, in_msg.LineAddress,
-                    TBEs.lookup(in_msg.LineAddress));
+                    TBEs[in_msg.LineAddress]);
           }
         } else {
           error("Invalid message");
@@ -691,7 +691,7 @@ machine(Directory, "Token protocol")
   action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
     peek(dmaRequestQueue_in, DMARequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.DataBlk := in_msg.DataBlk;
       tbe.PhysicalAddress := in_msg.PhysicalAddress;
       tbe.Len := in_msg.Len;
index 269e47dfda7c6bc10f5df47af2ef0be45f70a5e7..d5539e02139e5ed87a2974fae2ecb36d86c6c384 100644 (file)
@@ -210,7 +210,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     if(is_valid(cache_entry)) {
       testAndRead(addr, cache_entry.DataBlk, pkt);
     } else {
-      TBE tbe := TBEs.lookup(addr);
+      TBE tbe := TBEs[addr];
       if(is_valid(tbe)) {
         testAndRead(addr, tbe.DataBlk, pkt);
       } else {
@@ -229,7 +229,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
       return num_functional_writes;
     }
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     num_functional_writes := num_functional_writes +
       testAndWrite(addr, tbe.DataBlk, pkt);
     return num_functional_writes;
@@ -274,7 +274,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       return L1Cache_State_to_permission(tbe.TBEState);
     }
@@ -337,7 +337,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
       peek(triggerQueue_in, TriggerMsg) {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if (in_msg.Type == TriggerType:L2_to_L1) {
           trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
@@ -360,7 +360,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
       peek(responseToCache_in, ResponseMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if (in_msg.Type == CoherenceResponseType:ACK) {
           trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
@@ -385,7 +385,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
       peek(forwardToCache_in, RequestMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
 
         if ((in_msg.Type == CoherenceRequestType:GETX) ||
             (in_msg.Type == CoherenceRequestType:GETF)) {
@@ -429,7 +429,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-        TBE tbe := TBEs.lookup(in_msg.LineAddress);
+        TBE tbe := TBEs[in_msg.LineAddress];
 
         if (in_msg.Type == RubyRequestType:IFETCH) {
           // ** INSTRUCTION ACCESS ***
@@ -452,7 +452,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
                 trigger(Event:L2_Replacement,
                         l2_victim_addr,
                         getL2CacheEntry(l2_victim_addr),
-                        TBEs.lookup(l2_victim_addr));
+                        TBEs[l2_victim_addr]);
               }
             }
 
@@ -477,14 +477,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
                 trigger(Event:L1_to_L2,
                         l1i_victim_addr,
                         getL1ICacheEntry(l1i_victim_addr),
-                        TBEs.lookup(l1i_victim_addr));
+                        TBEs[l1i_victim_addr]);
               } else {
                 Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
                 // The L2 does not have room, so we replace a line from the L2
                 trigger(Event:L2_Replacement,
                         l2_victim_addr,
                         getL2CacheEntry(l2_victim_addr),
-                        TBEs.lookup(l2_victim_addr));
+                        TBEs[l2_victim_addr]);
               }
             }
           }
@@ -510,7 +510,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
                 trigger(Event:L2_Replacement,
                         l2_victim_addr,
                         getL2CacheEntry(l2_victim_addr),
-                        TBEs.lookup(l2_victim_addr));
+                        TBEs[l2_victim_addr]);
               }
             }
 
@@ -534,14 +534,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
                 trigger(Event:L1_to_L2,
                         l1d_victim_addr,
                         getL1DCacheEntry(l1d_victim_addr),
-                        TBEs.lookup(l1d_victim_addr));
+                        TBEs[l1d_victim_addr]);
               } else {
                 Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
                 // The L2 does not have room, so we replace a line from the L2
                 trigger(Event:L2_Replacement,
                         l2_victim_addr,
                         getL2CacheEntry(l2_victim_addr),
-                        TBEs.lookup(l2_victim_addr));
+                        TBEs[l2_victim_addr]);
               }
             }
           }
@@ -857,18 +857,9 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     }
   }
 
-  action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+  action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Dcache.setMRU(cache_entry);
-    sequencer.readCallback(address, cache_entry.DataBlk, false,
-                           testAndClearLocalHit(cache_entry));
-  }
-
-  action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
-    assert(is_valid(cache_entry));
-    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(cache_entry);
     sequencer.readCallback(address, cache_entry.DataBlk, false,
                            testAndClearLocalHit(cache_entry));
   }
@@ -878,8 +869,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     assert(is_valid(tbe));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
     peek(responseToCache_in, ResponseMsg) {
-      L1Icache.setMRU(address);
-      L1Dcache.setMRU(address);
+
       sequencer.readCallback(address, cache_entry.DataBlk, true,
                  machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
                  tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -890,7 +880,6 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
     peek(mandatoryQueue_in, RubyRequest) {
-      L1Dcache.setMRU(cache_entry);
       sequencer.writeCallback(address, cache_entry.DataBlk, false,
                               testAndClearLocalHit(cache_entry));
 
@@ -912,8 +901,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     assert(is_valid(tbe));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
     peek(responseToCache_in, ResponseMsg) {
-      L1Icache.setMRU(address);
-      L1Dcache.setMRU(address);
+
       sequencer.writeCallback(address, cache_entry.DataBlk, true,
               machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
               tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -926,8 +914,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     assert(is_valid(cache_entry));
     assert(is_valid(tbe));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    L1Icache.setMRU(address);
-    L1Dcache.setMRU(address);
+
     sequencer.writeCallback(address, cache_entry.DataBlk, true,
             machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
             tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -939,7 +926,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
     check_allocate(TBEs);
     assert(is_valid(cache_entry));
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
     tbe.Dirty := cache_entry.Dirty;
     tbe.Sharers := false;
@@ -948,7 +935,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   action(it_allocateTBE, "it", desc="Allocate TBE") {
     check_allocate(TBEs);
     TBEs.allocate(address);
-    set_tbe(TBEs.lookup(address));
+    set_tbe(TBEs[address]);
     tbe.Dirty := false;
     tbe.Sharers := false;
   }
@@ -1521,7 +1508,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   transition({S, SM, ISM}, Ifetch) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstHit;
     k_popMandatoryQueue;
   }
@@ -1535,7 +1522,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   transition(SR, Ifetch, S) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstMiss;
     uu_profileL2Hit;
     k_popMandatoryQueue;
@@ -1583,7 +1570,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstHit;
     k_popMandatoryQueue;
   }
@@ -1597,7 +1584,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   transition(OR, Ifetch, O) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstMiss;
     uu_profileL2Hit;
     k_popMandatoryQueue;
@@ -1648,7 +1635,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
 
   // Transitions from Modified
   transition({MM, M}, {Ifetch}) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstHit;
     k_popMandatoryQueue;
   }
@@ -1674,7 +1661,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   transition(MMR, Ifetch, MM) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstMiss;
     uu_profileL2Hit;
     k_popMandatoryQueue;
@@ -1755,7 +1742,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   transition(MR, Ifetch, M) {
-    h_ifetch_hit;
+    h_load_hit;
     uu_profileL1InstMiss;
     uu_profileL2Hit;
     k_popMandatoryQueue;
index b78d40510c5cb5a38e5f2ce1f7e6ace635a9c92c..27794a3bda8b1cbf42e566b3d07a8cf677ad3383 100644 (file)
@@ -195,7 +195,7 @@ machine(Directory, "AMD Hammer-like protocol")
   TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
 
   Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
-    Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
 
     if (is_valid(dir_entry)) {
       return dir_entry;
@@ -250,7 +250,7 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   AccessPermission getAccessPermission(Addr addr) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       return Directory_State_to_permission(tbe.TBEState);
     }
@@ -267,7 +267,7 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   void functionalRead(Addr addr, Packet *pkt) {
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       testAndRead(addr, tbe.DataBlk, pkt);
     } else {
@@ -278,7 +278,7 @@ machine(Directory, "AMD Hammer-like protocol")
   int functionalWrite(Addr addr, Packet *pkt) {
     int num_functional_writes := 0;
 
-    TBE tbe := TBEs.lookup(addr);
+    TBE tbe := TBEs[addr];
     if(is_valid(tbe)) {
       num_functional_writes := num_functional_writes +
         testAndWrite(addr, tbe.DataBlk, pkt);
@@ -317,7 +317,7 @@ machine(Directory, "AMD Hammer-like protocol")
     if (triggerQueue_in.isReady()) {
       peek(triggerQueue_in, TriggerMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == TriggerType:ALL_ACKS) {
           trigger(Event:All_acks_and_owner_data, in_msg.addr,
                   pf_entry, tbe);
@@ -341,7 +341,7 @@ machine(Directory, "AMD Hammer-like protocol")
     if (unblockNetwork_in.isReady()) {
       peek(unblockNetwork_in, ResponseMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
           trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
         } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
@@ -370,7 +370,7 @@ machine(Directory, "AMD Hammer-like protocol")
     if (responseToDir_in.isReady()) {
       peek(responseToDir_in, ResponseMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == CoherenceResponseType:ACK) {
           trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
         } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
@@ -393,7 +393,7 @@ machine(Directory, "AMD Hammer-like protocol")
     if (memQueue_in.isReady()) {
       peek(memQueue_in, MemoryMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
           trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
         } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
@@ -410,7 +410,7 @@ machine(Directory, "AMD Hammer-like protocol")
     if (requestQueue_in.isReady()) {
       peek(requestQueue_in, RequestMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
-        TBE tbe := TBEs.lookup(in_msg.addr);
+        TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == CoherenceRequestType:PUT) {
           trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
         } else if (in_msg.Type == CoherenceRequestType:PUTF) {
@@ -428,7 +428,7 @@ machine(Directory, "AMD Hammer-like protocol")
                 trigger(Event:Pf_Replacement,
                         probeFilter.cacheProbe(in_msg.addr),
                         getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
-                        TBEs.lookup(probeFilter.cacheProbe(in_msg.addr)));
+                        TBEs[probeFilter.cacheProbe(in_msg.addr)]);
               }
             }
           } else {
@@ -444,7 +444,7 @@ machine(Directory, "AMD Hammer-like protocol")
     if (dmaRequestQueue_in.isReady()) {
       peek(dmaRequestQueue_in, DMARequestMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
-        TBE tbe := TBEs.lookup(in_msg.LineAddress);
+        TBE tbe := TBEs[in_msg.LineAddress];
         if (in_msg.Type == DMARequestType:READ) {
           trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
         } else if (in_msg.Type == DMARequestType:WRITE) {
@@ -567,7 +567,7 @@ machine(Directory, "AMD Hammer-like protocol")
     check_allocate(TBEs);
     peek(requestQueue_in, RequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.PhysicalAddress := address;
       tbe.ResponseType := CoherenceResponseType:NULL;
     }
@@ -577,7 +577,7 @@ machine(Directory, "AMD Hammer-like protocol")
     check_allocate(TBEs);
     peek(dmaRequestQueue_in, DMARequestMsg) {
       TBEs.allocate(address);
-      set_tbe(TBEs.lookup(address));
+      set_tbe(TBEs[address]);
       tbe.DmaDataBlk := in_msg.DataBlk;
       tbe.PhysicalAddress := in_msg.PhysicalAddress;
       tbe.Len := in_msg.Len;
index 63f4b90ea996c4c759c8149409b8310720a7765c..d032adfd8968e05ca9a91113b20e82410d32f0eb 100644 (file)
@@ -50,10 +50,7 @@ structure(InPort, external = "yes", primitive="yes") {
 }
 
 external_type(NodeID, default="0", primitive="yes");
-structure (MachineID, external = "yes", non_obj="yes") {
-   MachineType getType();
-   NodeID getNum();
-}
+external_type(MachineID);
 
 structure (Set, external = "yes", non_obj="yes") {
   void setSize(int);
@@ -159,7 +156,6 @@ structure (CacheMemory, external = "yes") {
   Cycles getTagLatency();
   Cycles getDataLatency();
   void setMRU(Addr);
-  void setMRU(AbstractCacheEntry);
   void recordRequestType(CacheRequestType, Addr);
   bool checkResourceAvailable(CacheResourceType, Addr);
 
index 129a88e25481164dba388df982c066191d176fca..ac08fac82517408cbc8119b6e860215492e6ba27 100644 (file)
@@ -67,8 +67,6 @@ class DataBlock
   private:
     void alloc();
     uint8_t *m_data;
-    //! true if this DataBlock is responsible for deleting m_data,
-    //! false otherwise.
     bool m_alloc;
 };
 
index 31de160cf162e5fdf09111658b3ae4138a4aebbd..e377bc253511caa69b379eae6042084728e96b0c 100644 (file)
@@ -84,7 +84,7 @@ Histogram::doubleBinSize()
 }
 
 void
-Histogram::add(int64_t value)
+Histogram::add(int64 value)
 {
     assert(value >= 0);
     m_max = max(m_max, value);
index f02c4bedd146d49c63d4c46a14af18521e0dc35f..c34e39af1a8d9212a6e0801d230b92038a6dbe77 100644 (file)
@@ -40,7 +40,7 @@ class Histogram
     Histogram(int binsize = 1, uint32_t bins = 50);
     ~Histogram();
 
-    void add(int64_t value);
+    void add(int64 value);
     void add(Histogram& hist);
     void doubleBinSize();
 
@@ -51,10 +51,10 @@ class Histogram
     uint64_t size() const { return m_count; }
     uint32_t getBins() const { return m_data.size(); }
     int getBinSize() const { return m_binsize; }
-    int64_t getTotal() const { return m_sumSamples; }
+    int64 getTotal() const { return m_sumSamples; }
     uint64_t getSquaredTotal() const { return m_sumSquaredSamples; }
     uint64_t getData(int index) const { return m_data[index]; }
-    int64_t getMax() const { return m_max; }
+    int64 getMax() const { return m_max; }
 
     void printWithMultiplier(std::ostream& out, double multiplier) const;
     void printPercent(std::ostream& out) const;
@@ -62,12 +62,12 @@ class Histogram
 
 private:
     std::vector<uint64_t> m_data;
-    int64_t m_max;          // the maximum value seen so far
+    int64 m_max;          // the maximum value seen so far
     uint64_t m_count;                // the number of elements added
     int m_binsize;                // the size of each bucket
     uint32_t m_largest_bin;      // the largest bin used
 
-    int64_t m_sumSamples;   // the sum of all samples
+    int64 m_sumSamples;   // the sum of all samples
     uint64_t m_sumSquaredSamples; // the sum of the square of all samples
 
     double getStandardDeviation() const;
index f1839df726b7edd6c5230d2df0e4a6e5cbc0ba0c..5175cb9500a59658e249c57522505a08ca2eb266 100644 (file)
@@ -41,7 +41,7 @@ SubBlock::SubBlock(Addr addr, int size)
 }
 
 void
-SubBlock::mergeFrom(const DataBlock& data)
+SubBlock::internalMergeFrom(const DataBlock& data)
 {
     int size = getSize();
     assert(size > 0);
@@ -52,7 +52,7 @@ SubBlock::mergeFrom(const DataBlock& data)
 }
 
 void
-SubBlock::mergeTo(DataBlock& data) const
+SubBlock::internalMergeTo(DataBlock& data) const
 {
     int size = getSize();
     assert(size > 0);
@@ -68,3 +68,6 @@ SubBlock::print(std::ostream& out) const
 {
     out << "[" << m_address << ", " << getSize() << ", " << m_data << "]";
 }
+
+
+
index f336328faba931a121257f3b125153037476c3f4..ad1d68ae15afaf0196944b2dd82a5a615550d098 100644 (file)
@@ -56,12 +56,15 @@ class SubBlock
 
     // Merging to and from DataBlocks - We only need to worry about
     // updates when we are using DataBlocks
-    void mergeTo(DataBlock& data) const;
-    void mergeFrom(const DataBlock& data);
+    void mergeTo(DataBlock& data) const { internalMergeTo(data); }
+    void mergeFrom(const DataBlock& data) { internalMergeFrom(data); }
 
     void print(std::ostream& out) const;
 
   private:
+    void internalMergeTo(DataBlock& data) const;
+    void internalMergeFrom(const DataBlock& data);
+
     // Data Members (m_ prefix)
     Addr m_address;
     std::vector<uint8_t> m_data;
index f29efe8b5db4c15be9d2bf9a6a3941bc53d7424b..203b63779f347b2f91bb03e55d905e8f515d1b97 100644 (file)
@@ -30,6 +30,9 @@
 #ifndef TYPEDEFINES_H
 #define TYPEDEFINES_H
 
+typedef unsigned long long uint64;
+typedef long long int64;
+
 typedef unsigned int LinkID;
 typedef unsigned int NodeID;
 typedef unsigned int SwitchID;
index b0d27778208248941449739bdc4808898a9ab5f2..21b9152bed74940ee2a1e24b49fa28e7a11ff991 100644 (file)
@@ -507,8 +507,8 @@ H3BloomFilter::print(ostream& out) const
 int
 H3BloomFilter::get_index(Addr addr, int i)
 {
-    uint64_t x = makeLineAddress(addr);
-    // uint64_t y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
+    uint64 x = makeLineAddress(addr);
+    // uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
     int y = hash_H3(x,i);
 
     if (isParallel) {
@@ -519,10 +519,10 @@ H3BloomFilter::get_index(Addr addr, int i)
 }
 
 int
-H3BloomFilter::hash_H3(uint64_t value, int index)
+H3BloomFilter::hash_H3(uint64 value, int index)
 {
-    uint64_t mask = 1;
-    uint64_t val = value;
+    uint64 mask = 1;
+    uint64 val = value;
     int result = 0;
 
     for (int i = 0; i < 64; i++) {
index b6628f5e1668169a5979e506a96595f7cb4fa27e..8596d6acb04b6ed83554bd079f0f38ecfb405ce1 100644 (file)
@@ -68,7 +68,7 @@ class H3BloomFilter : public AbstractBloomFilter
   private:
     int get_index(Addr addr, int hashNumber);
 
-    int hash_H3(uint64_t value, int index);
+    int hash_H3(uint64 value, int index);
 
     std::vector<int> m_filter;
     int m_filter_size;
index f326030e9033c17f7bc47bdfcc1aa90be82acf6b..3cdca7e3bc57266e3bf8cf332a9d3e338fdc2454 100644 (file)
@@ -171,7 +171,7 @@ MultiBitSelBloomFilter::get_index(Addr addr, int i)
     // m_skip_bits is used to perform BitSelect after skipping some
     // bits. Used to simulate BitSel hashing on larger than cache-line
     // granularities
-    uint64_t x = (makeLineAddress(addr) >> m_skip_bits);
+    uint64 x = (makeLineAddress(addr) >> m_skip_bits);
     int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
     //36-bit addresses, 6-bit cache lines
 
@@ -183,10 +183,10 @@ MultiBitSelBloomFilter::get_index(Addr addr, int i)
 }
 
 int
-MultiBitSelBloomFilter::hash_bitsel(uint64_t value, int index, int jump,
+MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump,
                                     int maxBits, int numBits)
 {
-    uint64_t mask = 1;
+    uint64 mask = 1;
     int result = 0;
     int bit, i;
 
index b4fac06710901b48e3462b29c2e1cb38d6c12d06..e43dcd6f1eb2bc0e5d0537ff83492cfc1371c923 100644 (file)
@@ -68,7 +68,7 @@ class MultiBitSelBloomFilter : public AbstractBloomFilter
   private:
     int get_index(Addr addr, int hashNumber);
 
-    int hash_bitsel(uint64_t value, int index, int jump, int maxBits,
+    int hash_bitsel(uint64 value, int index, int jump, int maxBits,
                     int numBits);
 
     std::vector<int> m_filter;
index e9c5750285f9a80215ee3181de6d6b73a2bfa904..a72d8509e0d308b86d5dbc91e54d1e648cabc571 100644 (file)
@@ -362,6 +362,32 @@ MessageBuffer::isReady() const
         (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
 }
 
+bool
+MessageBuffer::functionalRead(Packet *pkt)
+{
+    // Check the priority heap and read any messages that may
+    // correspond to the address in the packet.
+    for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
+        Message *msg = m_prio_heap[i].get();
+        if (msg->functionalRead(pkt)) return true;
+    }
+
+    // Read the messages in the stall queue that correspond
+    // to the address in the packet.
+    for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
+         map_iter != m_stall_msg_map.end();
+         ++map_iter) {
+
+        for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
+            it != (map_iter->second).end(); ++it) {
+
+            Message *msg = (*it).get();
+            if (msg->functionalRead(pkt)) return true;
+        }
+    }
+    return false;
+}
+
 uint32_t
 MessageBuffer::functionalWrite(Packet *pkt)
 {
index 2625acabd029884146e68e629e38ca18a6c010d4..732b7ec6c417e353f558185922eaf5d75df0c6ad 100644 (file)
@@ -136,6 +136,11 @@ class MessageBuffer : public SimObject
     void setIncomingLink(int link_id) { m_input_link_id = link_id; }
     void setVnet(int net) { m_vnet_id = net; }
 
+    // Function for figuring out if any of the messages in the buffer can
+    // satisfy the read request for the address in the packet.
+    // Return value, if true, indicates that the request was fulfilled.
+    bool functionalRead(Packet *pkt);
+
     // Function for figuring out if any of the messages in the buffer need
     // to be updated with the data from the packet.
     // Return value indicates the number of messages that were updated.
@@ -179,7 +184,7 @@ class MessageBuffer : public SimObject
 
     int m_not_avail_count;  // count the # of times I didn't have N
                             // slots available
-    uint64_t m_msg_counter;
+    uint64 m_msg_counter;
     int m_priority_rank;
     const bool m_strict_fifo;
     const bool m_randomization;
index f72cea5a80b13de0cb1fd51dcf8d89b08f4bdbb5..d834ea1a355d80f3fa7cdee3604ed900ede2e9ef 100644 (file)
@@ -281,7 +281,7 @@ NetworkInterface::wakeup()
 
         int vnet = t_flit->get_vnet();
         m_net_ptr->increment_received_flits(vnet);
-        Cycles network_delay = curCycle() - t_flit->get_creation_time();
+        Cycles network_delay = curCycle() - t_flit->get_enqueue_time();
         Cycles queueing_delay = t_flit->get_delay();
 
         m_net_ptr->increment_network_latency(network_delay, vnet);
index aaf19b3b5c662798bb298e298276076c2ecef5df..7cf68560fbedc10b2db2319def3f11331ed91d92 100644 (file)
 #include "mem/ruby/network/garnet/flexible-pipeline/flit.hh"
 
 flit::flit(int id, int  vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime)
-    : m_id(id), m_vnet(vnet), m_vc(vc), m_size(size), m_creation_time(curTime)
 {
+    m_size = size;
     m_msg_ptr = msg_ptr;
+    m_enqueue_time = curTime;
     m_time = curTime;
+    m_id = id;
+    m_vnet = vnet;
+    m_vc = vc;
 
     if (size == 1) {
         m_type = HEAD_TAIL_;
@@ -48,6 +52,78 @@ flit::flit(int id, int  vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime)
         m_type = BODY_;
 }
 
+int
+flit::get_size()
+{
+    return m_size;
+}
+
+int
+flit::get_id()
+{
+    return m_id;
+}
+
+Cycles
+flit::get_time()
+{
+    return m_time;
+}
+
+Cycles
+flit::get_enqueue_time()
+{
+    return m_enqueue_time;
+}
+
+void
+flit::set_time(Cycles time)
+{
+    m_time = time;
+}
+
+int
+flit::get_vnet()
+{
+    return m_vnet;
+}
+
+int
+flit::get_vc()
+{
+    return m_vc;
+}
+
+void
+flit::set_vc(int vc)
+{
+    m_vc = vc;
+}
+
+MsgPtr&
+flit::get_msg_ptr()
+{
+    return m_msg_ptr;
+}
+
+flit_type
+flit::get_type()
+{
+    return m_type;
+}
+
+void
+flit::set_delay(Cycles delay)
+{
+    src_delay = delay;
+}
+
+Cycles
+flit::get_delay()
+{
+    return src_delay;
+}
+
 void
 flit::print(std::ostream& out) const
 {
@@ -56,7 +132,7 @@ flit::print(std::ostream& out) const
     out << "Type=" << m_type << " ";
     out << "Vnet=" << m_vnet << " ";
     out << "VC=" << m_vc << " ";
-    out << "Creation Time=" << m_creation_time << " ";
+    out << "Enqueue Time=" << m_enqueue_time << " ";
     out << "]";
 }
 
index 4049a92126d70fa625346ffa249137893dff796c..ff4afbc0867a1eaea6c36847770e65e4cdc41b95 100644 (file)
@@ -43,18 +43,18 @@ class flit
   public:
     flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime);
 
-    int get_size() const { return m_size; }
-    int get_id() const { return m_id; }
-    Cycles get_time() const { return m_time; }
-    Cycles get_creation_time() const { return m_creation_time; }
-    void set_time(Cycles time) { m_time = time; }
-    int get_vnet() const { return m_vnet; }
-    int get_vc() const { return m_vc; }
-    void set_vc(int vc) { m_vc = vc; }
-    MsgPtr& get_msg_ptr() { return m_msg_ptr; }
-    flit_type get_type() const { return m_type; }
-    void set_delay(Cycles delay) { src_delay = delay; }
-    Cycles get_delay() const { return src_delay; }
+    int get_size();
+    int get_id();
+    Cycles get_time();
+    Cycles get_enqueue_time();
+    void set_time(Cycles time);
+    int get_vnet();
+    int get_vc();
+    void set_vc(int vc);
+    MsgPtr& get_msg_ptr();
+    flit_type get_type();
+    void set_delay(Cycles delay);
+    Cycles get_delay();
     void print(std::ostream& out) const;
 
     static bool
@@ -71,12 +71,11 @@ class flit
     bool functionalWrite(Packet *pkt);
 
   private:
-    const int m_id;
-    const int m_vnet;
+    int m_id;
+    int m_vnet;
     int m_vc;
-    const int m_size;
-    const Cycles m_creation_time;
-    Cycles m_time;
+    int m_size;
+    Cycles m_enqueue_time, m_time;
     flit_type m_type;
     MsgPtr m_msg_ptr;
     Cycles src_delay;
index 697357ccb86c7ee2730885b82316f253b37c02b8..de038d211ec3ca620a0f88c18a525670ad5b1677 100644 (file)
@@ -49,8 +49,9 @@ operator<(const LinkOrder& l1, const LinkOrder& l2)
 }
 
 PerfectSwitch::PerfectSwitch(SwitchID sid, Switch *sw, uint32_t virt_nets)
-    : Consumer(sw), m_switch_id(sid), m_switch(sw)
+    : Consumer(sw)
 {
+    m_switch_id = sid;
     m_round_robin_start = 0;
     m_wakeups_wo_switch = 0;
     m_virtual_networks = virt_nets;
@@ -103,6 +104,9 @@ PerfectSwitch::~PerfectSwitch()
 void
 PerfectSwitch::operateVnet(int vnet)
 {
+    MsgPtr msg_ptr;
+    Message *net_msg_ptr = NULL;
+
     // This is for round-robin scheduling
     int incoming = m_round_robin_start;
     m_round_robin_start++;
@@ -119,6 +123,10 @@ PerfectSwitch::operateVnet(int vnet)
                 incoming = 0;
             }
 
+            // temporary vectors to store the routing results
+            vector<LinkID> output_links;
+            vector<NetDest> output_link_destinations;
+
             // Is there a message waiting?
             if (m_in[incoming].size() <= vnet) {
                 continue;
@@ -129,151 +137,138 @@ PerfectSwitch::operateVnet(int vnet)
                 continue;
             }
 
-            operateMessageBuffer(buffer, incoming, vnet);
-        }
-    }
-}
-
-void
-PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
-                                    int vnet)
-{
-    MsgPtr msg_ptr;
-    Message *net_msg_ptr = NULL;
-
-    // temporary vectors to store the routing results
-    vector<LinkID> output_links;
-    vector<NetDest> output_link_destinations;
-
-    while (buffer->isReady()) {
-        DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
-
-        // Peek at message
-        msg_ptr = buffer->peekMsgPtr();
-        net_msg_ptr = msg_ptr.get();
-        DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
-
-        output_links.clear();
-        output_link_destinations.clear();
-        NetDest msg_dsts = net_msg_ptr->getDestination();
-
-        // Unfortunately, the token-protocol sends some
-        // zero-destination messages, so this assert isn't valid
-        // assert(msg_dsts.count() > 0);
-
-        assert(m_link_order.size() == m_routing_table.size());
-        assert(m_link_order.size() == m_out.size());
-
-        if (m_network_ptr->getAdaptiveRouting()) {
-            if (m_network_ptr->isVNetOrdered(vnet)) {
-                // Don't adaptively route
-                for (int out = 0; out < m_out.size(); out++) {
-                    m_link_order[out].m_link = out;
-                    m_link_order[out].m_value = 0;
-                }
-            } else {
-                // Find how clogged each link is
-                for (int out = 0; out < m_out.size(); out++) {
-                    int out_queue_length = 0;
-                    for (int v = 0; v < m_virtual_networks; v++) {
-                        out_queue_length += m_out[out][v]->getSize();
+            while (buffer->isReady()) {
+                DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+
+                // Peek at message
+                msg_ptr = buffer->peekMsgPtr();
+                net_msg_ptr = msg_ptr.get();
+                DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+
+                output_links.clear();
+                output_link_destinations.clear();
+                NetDest msg_dsts = net_msg_ptr->getDestination();
+
+                // Unfortunately, the token-protocol sends some
+                // zero-destination messages, so this assert isn't valid
+                // assert(msg_dsts.count() > 0);
+
+                assert(m_link_order.size() == m_routing_table.size());
+                assert(m_link_order.size() == m_out.size());
+
+                if (m_network_ptr->getAdaptiveRouting()) {
+                    if (m_network_ptr->isVNetOrdered(vnet)) {
+                        // Don't adaptively route
+                        for (int out = 0; out < m_out.size(); out++) {
+                            m_link_order[out].m_link = out;
+                            m_link_order[out].m_value = 0;
+                        }
+                    } else {
+                        // Find how clogged each link is
+                        for (int out = 0; out < m_out.size(); out++) {
+                            int out_queue_length = 0;
+                            for (int v = 0; v < m_virtual_networks; v++) {
+                                out_queue_length += m_out[out][v]->getSize();
+                            }
+                            int value =
+                                (out_queue_length << 8) |
+                                random_mt.random(0, 0xff);
+                            m_link_order[out].m_link = out;
+                            m_link_order[out].m_value = value;
+                        }
+
+                        // Look at the most empty link first
+                        sort(m_link_order.begin(), m_link_order.end());
                     }
-                    int value =
-                        (out_queue_length << 8) |
-                        random_mt.random(0, 0xff);
-                    m_link_order[out].m_link = out;
-                    m_link_order[out].m_value = value;
                 }
 
-                // Look at the most empty link first
-                sort(m_link_order.begin(), m_link_order.end());
-            }
-        }
+                for (int i = 0; i < m_routing_table.size(); i++) {
+                    // pick the next link to look at
+                    int link = m_link_order[i].m_link;
+                    NetDest dst = m_routing_table[link];
+                    DPRINTF(RubyNetwork, "dst: %s\n", dst);
 
-        for (int i = 0; i < m_routing_table.size(); i++) {
-            // pick the next link to look at
-            int link = m_link_order[i].m_link;
-            NetDest dst = m_routing_table[link];
-            DPRINTF(RubyNetwork, "dst: %s\n", dst);
+                    if (!msg_dsts.intersectionIsNotEmpty(dst))
+                        continue;
 
-            if (!msg_dsts.intersectionIsNotEmpty(dst))
-                continue;
+                    // Remember what link we're using
+                    output_links.push_back(link);
 
-            // Remember what link we're using
-            output_links.push_back(link);
+                    // Need to remember which destinations need this message in
+                    // another vector.  This Set is the intersection of the
+                    // routing_table entry and the current destination set.  The
+                    // intersection must not be empty, since we are inside "if"
+                    output_link_destinations.push_back(msg_dsts.AND(dst));
 
-            // Need to remember which destinations need this message in
-            // another vector.  This Set is the intersection of the
-            // routing_table entry and the current destination set.  The
-            // intersection must not be empty, since we are inside "if"
-            output_link_destinations.push_back(msg_dsts.AND(dst));
-
-            // Next, we update the msg_destination not to include
-            // those nodes that were already handled by this link
-            msg_dsts.removeNetDest(dst);
-        }
+                    // Next, we update the msg_destination not to include
+                    // those nodes that were already handled by this link
+                    msg_dsts.removeNetDest(dst);
+                }
 
-        assert(msg_dsts.count() == 0);
+                assert(msg_dsts.count() == 0);
 
-        // Check for resources - for all outgoing queues
-        bool enough = true;
-        for (int i = 0; i < output_links.size(); i++) {
-            int outgoing = output_links[i];
+                // Check for resources - for all outgoing queues
+                bool enough = true;
+                for (int i = 0; i < output_links.size(); i++) {
+                    int outgoing = output_links[i];
 
-            if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
-                enough = false;
+                    if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+                        enough = false;
 
-            DPRINTF(RubyNetwork, "Checking if node is blocked ..."
-                    "outgoing: %d, vnet: %d, enough: %d\n",
-                    outgoing, vnet, enough);
-        }
+                    DPRINTF(RubyNetwork, "Checking if node is blocked ..."
+                            "outgoing: %d, vnet: %d, enough: %d\n",
+                            outgoing, vnet, enough);
+                }
 
-        // There were not enough resources
-        if (!enough) {
-            scheduleEvent(Cycles(1));
-            DPRINTF(RubyNetwork, "Can't deliver message since a node "
-                    "is blocked\n");
-            DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
-            break; // go to next incoming port
-        }
+                // There were not enough resources
+                if (!enough) {
+                    scheduleEvent(Cycles(1));
+                    DPRINTF(RubyNetwork, "Can't deliver message since a node "
+                            "is blocked\n");
+                    DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+                    break; // go to next incoming port
+                }
 
-        MsgPtr unmodified_msg_ptr;
+                MsgPtr unmodified_msg_ptr;
 
-        if (output_links.size() > 1) {
-            // If we are sending this message down more than one link
-            // (size>1), we need to make a copy of the message so each
-            // branch can have a different internal destination we need
-            // to create an unmodified MsgPtr because the MessageBuffer
-            // enqueue func will modify the message
+                if (output_links.size() > 1) {
+                    // If we are sending this message down more than one link
+                    // (size>1), we need to make a copy of the message so each
+                    // branch can have a different internal destination we need
+                    // to create an unmodified MsgPtr because the MessageBuffer
+                    // enqueue func will modify the message
 
-            // This magic line creates a private copy of the message
-            unmodified_msg_ptr = msg_ptr->clone();
-        }
+                    // This magic line creates a private copy of the message
+                    unmodified_msg_ptr = msg_ptr->clone();
+                }
 
-        // Dequeue msg
-        buffer->dequeue();
-        m_pending_message_count[vnet]--;
+                // Dequeue msg
+                buffer->dequeue();
+                m_pending_message_count[vnet]--;
 
-        // Enqueue it - for all outgoing queues
-        for (int i=0; i<output_links.size(); i++) {
-            int outgoing = output_links[i];
+                // Enqueue it - for all outgoing queues
+                for (int i=0; i<output_links.size(); i++) {
+                    int outgoing = output_links[i];
 
-            if (i > 0) {
-                // create a private copy of the unmodified message
-                msg_ptr = unmodified_msg_ptr->clone();
-            }
+                    if (i > 0) {
+                        // create a private copy of the unmodified message
+                        msg_ptr = unmodified_msg_ptr->clone();
+                    }
 
-            // Change the internal destination set of the message so it
-            // knows which destinations this link is responsible for.
-            net_msg_ptr = msg_ptr.get();
-            net_msg_ptr->getDestination() = output_link_destinations[i];
+                    // Change the internal destination set of the message so it
+                    // knows which destinations this link is responsible for.
+                    net_msg_ptr = msg_ptr.get();
+                    net_msg_ptr->getDestination() =
+                        output_link_destinations[i];
 
-            // Enqeue msg
-            DPRINTF(RubyNetwork, "Enqueuing net msg from "
-                    "inport[%d][%d] to outport [%d][%d].\n",
-                    incoming, vnet, outgoing, vnet);
+                    // Enqeue msg
+                    DPRINTF(RubyNetwork, "Enqueuing net msg from "
+                            "inport[%d][%d] to outport [%d][%d].\n",
+                            incoming, vnet, outgoing, vnet);
 
-            m_out[outgoing][vnet]->enqueue(msg_ptr);
+                    m_out[outgoing][vnet]->enqueue(msg_ptr);
+                }
+            }
         }
     }
 }
index 1cc9869642d3220908efbe1b535eae50814eca38..f55281d54238d0f1aa26e12bce19060eb77378bb 100644 (file)
@@ -85,10 +85,8 @@ class PerfectSwitch : public Consumer
     PerfectSwitch& operator=(const PerfectSwitch& obj);
 
     void operateVnet(int vnet);
-    void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
 
-    const SwitchID m_switch_id;
-    Switch * const m_switch;
+    SwitchID m_switch_id;
 
     // vector of queues from the components
     std::vector<std::vector<MessageBuffer*> > m_in;
index 09daa79602ab04f54a4d9c78dfa475e74c9840b4..5b7d7ebadeab72e8c3dffc1b9170df799437ce6a 100644 (file)
 #include "mem/ruby/network/simple/Switch.hh"
 #include "mem/ruby/network/simple/Throttle.hh"
 #include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/system/System.hh"
 
 using namespace std;
 using m5::stl_helpers::deletePointers;
 
 SimpleNetwork::SimpleNetwork(const Params *p)
-    : Network(p), m_buffer_size(p->buffer_size),
-      m_endpoint_bandwidth(p->endpoint_bandwidth),
-      m_adaptive_routing(p->adaptive_routing)
+    : Network(p)
 {
+    m_buffer_size = p->buffer_size;
+    m_endpoint_bandwidth = p->endpoint_bandwidth;
+    m_adaptive_routing = p->adaptive_routing;
+
+    // Note: the parent Network Object constructor is called before the
+    // SimpleNetwork child constructor.  Therefore, the member variables
+    // used below should already be initialized.
+    m_endpoint_switches.resize(m_nodes);
+
     // record the routers
     for (vector<BasicRouter*>::const_iterator i = p->routers.begin();
          i != p->routers.end(); ++i) {
@@ -91,6 +99,8 @@ SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
     m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
                                 simple_link->m_latency,
                                 simple_link->m_bw_multiplier);
+
+    m_endpoint_switches[dest] = m_switches[src];
 }
 
 // From an endpoint node to a switch
@@ -223,6 +233,12 @@ SimpleNetwork::functionalRead(Packet *pkt)
         }
     }
 
+    for (unsigned int i = 0; i < m_int_link_buffers.size(); ++i) {
+        if (m_int_link_buffers[i]->functionalRead(pkt)) {
+            return true;
+        }
+    }
+
     return false;
 }
 
index efb342e6e5e535c98c2e0782858e1d4b47a10574..fe0c1838b22d4cd15cd29b9bc530a95001681f6b 100644 (file)
@@ -95,9 +95,11 @@ class SimpleNetwork : public Network
     std::vector<Switch*> m_switches;
     std::vector<MessageBuffer*> m_int_link_buffers;
     int m_num_connected_buffers;
-    const int m_buffer_size;
-    const int m_endpoint_bandwidth;
-    const bool m_adaptive_routing;
+    std::vector<Switch*> m_endpoint_switches;
+
+    int m_buffer_size;
+    int m_endpoint_bandwidth;
+    bool m_adaptive_routing;    
 
     //Statistical variables
     Stats::Formula m_msg_counts[MessageSizeType_NUM];
index e5988e5052f9b393033404017728a2118c4c1fd5..b9d0b80103bfd1b1d95ab606964f1383cfe3f9ae 100644 (file)
@@ -184,6 +184,12 @@ Switch::print(std::ostream& out) const
 bool
 Switch::functionalRead(Packet *pkt)
 {
+    // Access the buffers in the switch for performing a functional read
+    for (unsigned int i = 0; i < m_port_buffers.size(); ++i) {
+        if (m_port_buffers[i]->functionalRead(pkt)) {
+            return true;
+        }
+    }
     return false;
 }
 
index c97531e585210ae58c212279e00cde0632f42eac..785e09aa2aa8a28bd2cbdee17580072c52e2b59d 100644 (file)
@@ -31,7 +31,6 @@
 #include "base/cast.hh"
 #include "base/cprintf.hh"
 #include "debug/RubyNetwork.hh"
-#include "mem/ruby/network/simple/Switch.hh"
 #include "mem/ruby/network/simple/Throttle.hh"
 #include "mem/ruby/network/MessageBuffer.hh"
 #include "mem/ruby/network/Network.hh"
@@ -49,10 +48,27 @@ static int network_message_to_size(Message* net_msg_ptr);
 
 Throttle::Throttle(int sID, RubySystem *rs, NodeID node, Cycles link_latency,
                    int link_bandwidth_multiplier, int endpoint_bandwidth,
-                   Switch *em)
-    : Consumer(em), m_switch_id(sID), m_switch(em), m_node(node),
-      m_ruby_system(rs)
+                   ClockedObject *em)
+    : Consumer(em), m_ruby_system(rs)
 {
+    init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
+    m_sID = sID;
+}
+
+Throttle::Throttle(RubySystem *rs, NodeID node, Cycles link_latency,
+                   int link_bandwidth_multiplier, int endpoint_bandwidth,
+                   ClockedObject *em)
+    : Consumer(em), m_ruby_system(rs)
+{
+    init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
+    m_sID = 0;
+}
+
+void
+Throttle::init(NodeID node, Cycles link_latency,
+               int link_bandwidth_multiplier, int endpoint_bandwidth)
+{
+    m_node = node;
     m_vnets = 0;
 
     assert(link_bandwidth_multiplier > 0);
@@ -82,7 +98,7 @@ Throttle::addLinks(const vector<MessageBuffer*>& in_vec,
 
         // Set consumer and description
         in_ptr->setConsumer(this);
-        string desc = "[Queue to Throttle " + to_string(m_switch_id) + " " +
+        string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
             to_string(m_node) + "]";
     }
 }
index 405593bb129d70a86313176faffa67f885a03bcf..85bf9691ad5c6521ce13bdd101f36839addc18a7 100644 (file)
 #include "mem/ruby/system/System.hh"
 
 class MessageBuffer;
-class Switch;
 
 class Throttle : public Consumer
 {
   public:
     Throttle(int sID, RubySystem *rs, NodeID node, Cycles link_latency,
              int link_bandwidth_multiplier, int endpoint_bandwidth,
-             Switch *em);
+             ClockedObject *em);
+    Throttle(RubySystem *rs, NodeID node, Cycles link_latency,
+             int link_bandwidth_multiplier, int endpoint_bandwidth,
+             ClockedObject *em);
     ~Throttle() {}
 
     std::string name()
-    { return csprintf("Throttle-%i", m_switch_id); }
+    { return csprintf("Throttle-%i", m_sID); }
 
     void addLinks(const std::vector<MessageBuffer*>& in_vec,
                   const std::vector<MessageBuffer*>& out_vec);
@@ -95,10 +97,8 @@ class Throttle : public Consumer
     unsigned int m_vnets;
     std::vector<int> m_units_remaining;
 
-    const int m_switch_id;
-    Switch *m_switch;
+    int m_sID;
     NodeID m_node;
-
     int m_link_bandwidth_multiplier;
     Cycles m_link_latency;
     int m_wakeups_wo_switch;
index 3e9d54499127d438e4a3c9682505b1ba4e6fe788..af42489bc57479f081d41426ea54978ad8c249e5 100644 (file)
@@ -67,12 +67,12 @@ class AccessTraceForAddress
 
   private:
     Addr m_addr;
-    uint64_t m_loads;
-    uint64_t m_stores;
-    uint64_t m_atomics;
-    uint64_t m_total;
-    uint64_t m_user;
-    uint64_t m_sharing;
+    uint64 m_loads;
+    uint64 m_stores;
+    uint64 m_atomics;
+    uint64 m_total;
+    uint64 m_user;
+    uint64 m_sharing;
     Set m_touched_by;
     Histogram* m_histogram_ptr;
 };
index 52c693330aaceca1454f2fbf28dddd0e167a517c..0e7ea7e36b70a9789b12caf9a5ad33e77f42bbd5 100644 (file)
@@ -67,7 +67,7 @@ printSorted(ostream& out, int num_of_sequencers, const AddressMap &record_map,
 {
     const int records_printed = 100;
 
-    uint64_t misses = 0;
+    uint64 misses = 0;
     std::vector<const AccessTraceForAddress *> sorted;
 
     AddressMap::const_iterator i = record_map.begin();
@@ -95,8 +95,8 @@ printSorted(ostream& out, int num_of_sequencers, const AddressMap &record_map,
     Histogram all_records_log(-1);
 
     // Allows us to track how many lines where touched by n processors
-    std::vector<int64_t> m_touched_vec;
-    std::vector<int64_t> m_touched_weighted_vec;
+    std::vector<int64> m_touched_vec;
+    std::vector<int64> m_touched_weighted_vec;
     m_touched_vec.resize(num_of_sequencers+1);
     m_touched_weighted_vec.resize(num_of_sequencers+1);
     for (int j = 0; j < m_touched_vec.size(); j++) {
index ebd44080b046cf9cc9ca2bf704126b56161a2529..39544c0a216cd61bcf316fb60032e87fd796f2f0 100644 (file)
@@ -75,7 +75,7 @@ class AddressProfiler
     AddressProfiler(const AddressProfiler& obj);
     AddressProfiler& operator=(const AddressProfiler& obj);
 
-    int64_t m_sharing_miss_counter;
+    int64 m_sharing_miss_counter;
 
     AddressMap m_dataAccessTrace;
     AddressMap m_macroBlockAccessTrace;
index b3b37e5a6c865187759265653831c758ddaf24b0..7decd497ac23f4e64fd11a2e385a799ce4c74ea9 100644 (file)
@@ -61,10 +61,11 @@ using namespace std;
 using m5::stl_helpers::operator<<;
 
 Profiler::Profiler(const RubySystemParams *p, RubySystem *rs)
-    : m_ruby_system(rs), m_hot_lines(p->hot_lines),
-      m_all_instructions(p->all_instructions),
-      m_num_vnets(p->number_of_virtual_networks)
+    : m_ruby_system(rs)
 {
+    m_hot_lines = p->hot_lines;
+    m_all_instructions = p->all_instructions;
+
     m_address_profiler_ptr = new AddressProfiler(p->num_of_sequencers, this);
     m_address_profiler_ptr->setHotLines(m_hot_lines);
     m_address_profiler_ptr->setAllInstructions(m_all_instructions);
@@ -97,7 +98,8 @@ Profiler::regStats(const std::string &pName)
         .desc("delay histogram for all message")
         .flags(Stats::nozero | Stats::pdf | Stats::oneline);
 
-    for (int i = 0; i < m_num_vnets; i++) {
+    uint32_t numVNets = Network::getNumberOfVirtualNetworks();
+    for (int i = 0; i < numVNets; i++) {
         delayVCHistogram.push_back(new Stats::Histogram());
         delayVCHistogram[i]
             ->init(10)
@@ -249,6 +251,7 @@ Profiler::collateStats()
         m_inst_profiler_ptr->collateStats();
     }
 
+    uint32_t numVNets = Network::getNumberOfVirtualNetworks();
     for (uint32_t i = 0; i < MachineType_NUM; i++) {
         for (map<uint32_t, AbstractController*>::iterator it =
                   m_ruby_system->m_abstract_controls[i].begin();
@@ -257,7 +260,7 @@ Profiler::collateStats()
             AbstractController *ctr = (*it).second;
             delayHistogram.add(ctr->getDelayHist());
 
-            for (uint32_t i = 0; i < m_num_vnets; i++) {
+            for (uint32_t i = 0; i < numVNets; i++) {
                 delayVCHistogram[i]->add(ctr->getDelayVCHist(i));
             }
         }
index 6cfdab1d55baa8afd225c1157d47913f4e9e2d07..146beadd6a08b2c803799634bf6e75e5dccd4351 100644 (file)
@@ -80,8 +80,8 @@ class Profiler
     void addAddressTraceSample(const RubyRequest& msg, NodeID id);
 
     // added by SS
-    bool getHotLines() const { return m_hot_lines; }
-    bool getAllInstructions() const { return m_all_instructions; }
+    bool getHotLines() { return m_hot_lines; }
+    bool getAllInstructions() { return m_all_instructions; }
 
   private:
     // Private copy constructor and assignment operator
@@ -129,9 +129,8 @@ class Profiler
     Stats::Scalar m_IncompleteTimes[MachineType_NUM];
 
     //added by SS
-    const bool m_hot_lines;
-    const bool m_all_instructions;
-    const uint32_t m_num_vnets;
+    bool m_hot_lines;
+    bool m_all_instructions;
 };
 
 #endif // __MEM_RUBY_PROFILER_PROFILER_HH__
index c3c1f8a198afe5871b946a83dc328b2b87344036..40bf2e7b671b2a0ceac69bb5236d67fd9a5ca614 100644 (file)
@@ -33,7 +33,7 @@ using namespace std;
 
 bool StoreTrace::s_init = false; // Total number of store lifetimes of
                                  // all lines
-int64_t StoreTrace::s_total_samples = 0; // Total number of store
+int64 StoreTrace::s_total_samples = 0; // Total number of store
                                        // lifetimes of all lines
 Histogram* StoreTrace::s_store_count_ptr = NULL;
 Histogram* StoreTrace::s_store_first_to_stolen_ptr = NULL;
index a686594f8e1d9b6638a959f5b5b225d5c8686f53..9c1b83cd654e0b48619a35544c82b59f02b654a1 100644 (file)
@@ -53,7 +53,7 @@ class StoreTrace
 
   private:
     static bool s_init;
-    static int64_t s_total_samples; // Total number of store lifetimes
+    static int64 s_total_samples; // Total number of store lifetimes
                                   // of all lines
     static Histogram* s_store_count_ptr;
     static Histogram* s_store_first_to_stolen_ptr;
@@ -66,7 +66,7 @@ class StoreTrace
     Tick m_last_store;
     int m_stores_this_interval;
 
-    int64_t m_total_samples; // Total number of store lifetimes of this line
+    int64 m_total_samples; // Total number of store lifetimes of this line
     Histogram m_store_count;
     Histogram m_store_first_to_stolen;
     Histogram m_store_last_to_stolen;
index 416aea73be12dae96a492f2654a10e24d5ca8205..01fd3f522b1baa57dd4a09fc1442df84801879d9 100644 (file)
@@ -28,9 +28,6 @@
 
 #include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
 
-#include "base/trace.hh"
-#include "debug/RubyCache.hh"
-
 AbstractCacheEntry::AbstractCacheEntry()
 {
     m_Permission = AccessPermission_NotPresent;
@@ -51,25 +48,3 @@ AbstractCacheEntry::changePermission(AccessPermission new_perm)
         m_locked = -1;
     }
 }
-
-void
-AbstractCacheEntry::setLocked(int context)
-{
-    DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", m_Address, context);
-    m_locked = context;
-}
-
-void
-AbstractCacheEntry::clearLocked()
-{
-    DPRINTF(RubyCache, "Clear Lock for addr: %x\n", m_Address);
-    m_locked = -1;
-}
-
-bool
-AbstractCacheEntry::isLocked(int context) const
-{
-    DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
-            m_Address, m_locked, context);
-    return m_locked == context;
-}
index 9265567819ea5769b53a903c1d9d61097a9724b4..6c7a4a00840d7954c902e534b94f2dca9b311b42 100644 (file)
@@ -56,28 +56,10 @@ class AbstractCacheEntry : public AbstractEntry
     virtual DataBlock& getDataBlk()
     { panic("getDataBlk() not implemented!"); }
 
-    // Functions for locking and unlocking the cache entry.  These are required
-    // for supporting atomic memory accesses.
-    void setLocked(int context);
-    void clearLocked();
-    bool isLocked(int context) const;
 
-    void setSetIndex(uint32_t s) { m_set_index = s; }
-    uint32_t getSetIndex() const { return m_set_index; }
-
-    void setWayIndex(uint32_t s) { m_way_index = s; }
-    uint32_t getWayIndex() const { return m_way_index; }
-
-    // Address of this block, required by CacheMemory
-    Addr m_Address;
-    // Holds info whether the address is locked.
-    // Required for implementing LL/SC operations.
-    int m_locked;
-
-  private:
-    // Set and way coordinates of the entry within the cache memory object.
-    uint32_t m_set_index;
-    uint32_t m_way_index;
+    Addr m_Address; // Address of this block, required by CacheMemory
+    int m_locked; // Holds info whether the address is locked,
+                  // required for implementing LL/SC
 };
 
 inline std::ostream&
index 34160c149796ac89da47564d725bd02178118cbf..94361034af01ee29b4202b93f90eacb6813fdb7a 100644 (file)
@@ -139,14 +139,14 @@ class AbstractController : public MemObject, public Consumer
     void wakeUpAllBuffers();
 
   protected:
-    const NodeID m_version;
+    NodeID m_version;
     MachineID m_machineID;
-    const NodeID m_clusterID;
+    NodeID m_clusterID;
 
     // MasterID used by some components of gem5.
-    const MasterID m_masterId;
+    MasterID m_masterId;
 
-    Network *m_net_ptr;
+    Networkm_net_ptr;
     bool m_is_blocking;
     std::map<Addr, MessageBuffer*> m_block_map;
 
@@ -157,9 +157,9 @@ class AbstractController : public MemObject, public Consumer
 
     unsigned int m_in_ports;
     unsigned int m_cur_in_port;
-    const int m_number_of_TBEs;
-    const int m_transitions_per_cycle;
-    const unsigned int m_buffer_size;
+    int m_number_of_TBEs;
+    int m_transitions_per_cycle;
+    unsigned int m_buffer_size;
     Cycles m_recycle_latency;
 
     //! Counter for the number of cycles when the transitions carried out
index d802ecd31f2f44d79455ace412aca1be386e44da..fbcce6e2d422b630dd2a422278a5e96d2fb25e69 100644 (file)
@@ -66,7 +66,7 @@ AbstractReplacementPolicy::~AbstractReplacementPolicy()
 }
 
 Tick
-AbstractReplacementPolicy::getLastAccess(int64_t set, int64_t way)
+AbstractReplacementPolicy::getLastAccess(int64 set, int64 way)
 {
     return m_last_ref_ptr[set][way];
 }
index c118f3c11f5c8720ae82f8e49b00e7680abc4b3c..03ef0d2fd122bb7ffee484224676bce4e2c15fa0 100644 (file)
@@ -44,13 +44,13 @@ class AbstractReplacementPolicy : public SimObject
     virtual ~AbstractReplacementPolicy();
 
     /* touch a block. a.k.a. update timestamp */
-    virtual void touch(int64_t set, int64_t way, Tick time) = 0;
+    virtual void touch(int64 set, int64 way, Tick time) = 0;
 
     /* returns the way to replace */
-    virtual int64_t getVictim(int64_t set) const = 0;
+    virtual int64 getVictim(int64 set) const = 0;
 
     /* get the time of the last access */
-    Tick getLastAccess(int64_t set, int64_t way);
+    Tick getLastAccess(int64 set, int64 way);
 
     virtual bool useOccupancy() const { return false; }
 
index b25962df6a13379f5b26d1187ac197e130001998..8bc3cf584cc4bea0c5af9f59caa85b3108622a90 100644 (file)
@@ -49,7 +49,7 @@ BankedArray::BankedArray(unsigned int banks, Cycles accessLatency,
 }
 
 bool
-BankedArray::tryAccess(int64_t idx)
+BankedArray::tryAccess(int64 idx)
 {
     if (accessLatency == 0)
         return true;
@@ -65,7 +65,7 @@ BankedArray::tryAccess(int64_t idx)
 }
 
 void
-BankedArray::reserve(int64_t idx)
+BankedArray::reserve(int64 idx)
 {
     if (accessLatency == 0)
         return;
@@ -91,7 +91,7 @@ BankedArray::reserve(int64_t idx)
 }
 
 unsigned int
-BankedArray::mapIndexToBank(int64_t idx)
+BankedArray::mapIndexToBank(int64 idx)
 {
     if (banks == 1) {
         return 0;
index 179676f1928b1066b8614bd0a377ec27461af44c..438186944a2dde59af529e1162e13017c051c530 100644 (file)
@@ -51,7 +51,7 @@ class BankedArray
     {
       public:
         AccessRecord() : idx(0), startAccess(0), endAccess(0) {}
-        int64_t idx;
+        int64 idx;
         Tick startAccess;
         Tick endAccess;
     };
@@ -60,7 +60,7 @@ class BankedArray
     // otherwise, schedule the event and wait for it to complete
     std::vector<AccessRecord> busyBanks;
 
-    unsigned int mapIndexToBank(int64_t idx);
+    unsigned int mapIndexToBank(int64 idx);
 
   public:
     BankedArray(unsigned int banks, Cycles accessLatency,
@@ -68,9 +68,9 @@ class BankedArray
 
     // Note: We try the access based on the cache index, not the address
     // This is so we don't get aliasing on blocks being replaced
-    bool tryAccess(int64_t idx);
+    bool tryAccess(int64 idx);
 
-    void reserve(int64_t idx);
+    void reserve(int64 idx);
 
     Cycles getLatency() const { return accessLatency; }
 };
index ac6f823ce6b4c3553c2e93ef83f0423fe120ea83..7eba450c13ea0e0ced767b39409e9011d127bb1a 100644 (file)
@@ -98,7 +98,7 @@ CacheMemory::~CacheMemory()
 }
 
 // convert a Address to its location in the cache
-int64_t
+int64
 CacheMemory::addressToCacheSet(Addr address) const
 {
     assert(address == makeLineAddress(address));
@@ -109,7 +109,7 @@ CacheMemory::addressToCacheSet(Addr address) const
 // Given a cache index: returns the index of the tag in a set.
 // returns -1 if the tag is not found.
 int
-CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
+CacheMemory::findTagInSet(int64 cacheSet, Addr tag) const
 {
     assert(tag == makeLineAddress(tag));
     // search the set for the tags
@@ -124,7 +124,7 @@ CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
 // Given a cache index: returns the index of the tag in a set.
 // returns -1 if the tag is not found.
 int
-CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
+CacheMemory::findTagInSetIgnorePermissions(int64 cacheSet,
                                            Addr tag) const
 {
     assert(tag == makeLineAddress(tag));
@@ -158,12 +158,62 @@ CacheMemory::getAddressAtIdx(int idx) const
     return entry->m_Address;
 }
 
+bool
+CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
+                            DataBlock*& data_ptr)
+{
+    assert(address == makeLineAddress(address));
+    DPRINTF(RubyCache, "address: %s\n", address);
+    int64 cacheSet = addressToCacheSet(address);
+    int loc = findTagInSet(cacheSet, address);
+    if (loc != -1) {
+        // Do we even have a tag match?
+        AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+        m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+        data_ptr = &(entry->getDataBlk());
+
+        if (entry->m_Permission == AccessPermission_Read_Write) {
+            return true;
+        }
+        if ((entry->m_Permission == AccessPermission_Read_Only) &&
+            (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
+            return true;
+        }
+        // The line must not be accessible
+    }
+    data_ptr = NULL;
+    return false;
+}
+
+bool
+CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
+                             DataBlock*& data_ptr)
+{
+    assert(address == makeLineAddress(address));
+    DPRINTF(RubyCache, "address: %s\n", address);
+    int64 cacheSet = addressToCacheSet(address);
+    int loc = findTagInSet(cacheSet, address);
+
+    if (loc != -1) {
+        // Do we even have a tag match?
+        AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+        m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+        data_ptr = &(entry->getDataBlk());
+
+        return m_cache[cacheSet][loc]->m_Permission !=
+            AccessPermission_NotPresent;
+    }
+
+    data_ptr = NULL;
+    return false;
+}
+
 // tests to see if an address is present in the cache
 bool
 CacheMemory::isTagPresent(Addr address) const
 {
     assert(address == makeLineAddress(address));
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
 
     if (loc == -1) {
@@ -183,7 +233,7 @@ CacheMemory::cacheAvail(Addr address) const
 {
     assert(address == makeLineAddress(address));
 
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
 
     for (int i = 0; i < m_cache_assoc; i++) {
         AbstractCacheEntry* entry = m_cache[cacheSet][i];
@@ -201,7 +251,7 @@ CacheMemory::cacheAvail(Addr address) const
 }
 
 AbstractCacheEntry*
-CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
+CacheMemory::allocate(Addr address, AbstractCacheEntryentry, bool touch)
 {
     assert(address == makeLineAddress(address));
     assert(!isTagPresent(address));
@@ -209,7 +259,7 @@ CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
     DPRINTF(RubyCache, "address: %s\n", address);
 
     // Find the first open slot
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
     for (int i = 0; i < m_cache_assoc; i++) {
         if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
@@ -220,8 +270,6 @@ CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
                     address);
             set[i]->m_locked = -1;
             m_tag_index[address] = i;
-            entry->setSetIndex(cacheSet);
-            entry->setWayIndex(i);
 
             if (touch) {
                 m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
@@ -239,7 +287,7 @@ CacheMemory::deallocate(Addr address)
     assert(address == makeLineAddress(address));
     assert(isTagPresent(address));
     DPRINTF(RubyCache, "address: %s\n", address);
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
     if (loc != -1) {
         delete m_cache[cacheSet][loc];
@@ -255,7 +303,7 @@ CacheMemory::cacheProbe(Addr address) const
     assert(address == makeLineAddress(address));
     assert(!cacheAvail(address));
 
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
         m_Address;
 }
@@ -265,7 +313,7 @@ AbstractCacheEntry*
 CacheMemory::lookup(Addr address)
 {
     assert(address == makeLineAddress(address));
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
     if(loc == -1) return NULL;
     return m_cache[cacheSet][loc];
@@ -276,7 +324,7 @@ const AbstractCacheEntry*
 CacheMemory::lookup(Addr address) const
 {
     assert(address == makeLineAddress(address));
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
     if(loc == -1) return NULL;
     return m_cache[cacheSet][loc];
@@ -286,27 +334,19 @@ CacheMemory::lookup(Addr address) const
 void
 CacheMemory::setMRU(Addr address)
 {
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
 
     if(loc != -1)
         m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
 }
 
-void
-CacheMemory::setMRU(const AbstractCacheEntry *e)
-{
-    uint32_t cacheSet = e->getSetIndex();
-    uint32_t loc = e->getWayIndex();
-    m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
-}
-
 void
 CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
 {
-    uint64_t warmedUpBlocks = 0;
-    uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
-                                       (uint64_t)m_cache_assoc;
+    uint64 warmedUpBlocks = 0;
+    uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
+                                                  * (uint64)m_cache_assoc;
 
     for (int i = 0; i < m_cache_num_sets; i++) {
         for (int j = 0; j < m_cache_assoc; j++) {
@@ -336,7 +376,8 @@ CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
 
     DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
             "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
-            totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
+            (uint64)m_cache_num_sets * (uint64)m_cache_assoc,
+            (float(warmedUpBlocks)/float(totalBlocks))*100.0);
 }
 
 void
@@ -369,10 +410,10 @@ CacheMemory::setLocked(Addr address, int context)
 {
     DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
     assert(address == makeLineAddress(address));
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
     assert(loc != -1);
-    m_cache[cacheSet][loc]->setLocked(context);
+    m_cache[cacheSet][loc]->m_locked = context;
 }
 
 void
@@ -380,22 +421,22 @@ CacheMemory::clearLocked(Addr address)
 {
     DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
     assert(address == makeLineAddress(address));
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
     assert(loc != -1);
-    m_cache[cacheSet][loc]->clearLocked();
+    m_cache[cacheSet][loc]->m_locked = -1;
 }
 
 bool
 CacheMemory::isLocked(Addr address, int context)
 {
     assert(address == makeLineAddress(address));
-    int64_t cacheSet = addressToCacheSet(address);
+    int64 cacheSet = addressToCacheSet(address);
     int loc = findTagInSet(cacheSet, address);
     assert(loc != -1);
     DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
             address, m_cache[cacheSet][loc]->m_locked, context);
-    return m_cache[cacheSet][loc]->isLocked(context);
+    return m_cache[cacheSet][loc]->m_locked == context;
 }
 
 void
@@ -553,13 +594,13 @@ CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
 }
 
 bool
-CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
+CacheMemory::isBlockInvalid(int64 cache_set, int64 loc)
 {
   return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
 }
 
 bool
-CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
+CacheMemory::isBlockNotBusy(int64 cache_set, int64 loc)
 {
   return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
 }
index 94174b28676440950b67bfa1a0c5bb86cc3534f8..08551ab87c9f4699a9978b8e85763d033118a91e 100644 (file)
@@ -56,6 +56,15 @@ class CacheMemory : public SimObject
 
     void init();
 
+    // Public Methods
+    // perform a cache access and see if we hit or not.  Return true on a hit.
+    bool tryCacheAccess(Addr address, RubyRequestType type,
+                        DataBlock*& data_ptr);
+
+    // similar to above, but doesn't require full access check
+    bool testCacheAccess(Addr address, RubyRequestType type,
+                         DataBlock*& data_ptr);
+
     // tests to see if an address is present in the cache
     bool isTagPresent(Addr address) const;
 
@@ -89,22 +98,15 @@ class CacheMemory : public SimObject
     Cycles getTagLatency() const { return tagArray.getLatency(); }
     Cycles getDataLatency() const { return dataArray.getLatency(); }
 
-    bool isBlockInvalid(int64_t cache_set, int64_t loc);
-    bool isBlockNotBusy(int64_t cache_set, int64_t loc);
+    bool isBlockInvalid(int64 cache_set, int64 loc);
+    bool isBlockNotBusy(int64 cache_set, int64 loc);
 
     // Hook for checkpointing the contents of the cache
     void recordCacheContents(int cntrl, CacheRecorder* tr) const;
 
     // Set this address to most recently used
     void setMRU(Addr address);
-    // Set this entry to most recently used
-    void setMRU(const AbstractCacheEntry *e);
-
-    // Functions for locking and unlocking cache lines corresponding to the
-    // provided address.  These are required for supporting atomic memory
-    // accesses.  These are to be used when only the address of the cache entry
-    // is available.  In case the entry itself is available. use the functions
-    // provided by the AbstractCacheEntry class.
+
     void setLocked (Addr addr, int context);
     void clearLocked (Addr addr);
     bool isLocked (Addr addr, int context);
@@ -142,12 +144,12 @@ class CacheMemory : public SimObject
 
   private:
     // convert a Address to its location in the cache
-    int64_t addressToCacheSet(Addr address) const;
+    int64 addressToCacheSet(Addr address) const;
 
     // Given a cache tag: returns the index of the tag in a set.
     // returns -1 if the tag is not found.
-    int findTagInSet(int64_t line, Addr tag) const;
-    int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const;
+    int findTagInSet(int64 line, Addr tag) const;
+    int findTagInSetIgnorePermissions(int64 cacheSet, Addr tag) const;
 
     // Private copy constructor and assignment operator
     CacheMemory(const CacheMemory& obj);
index 82388a8954f6f27c28a46b2c011c7c686c08a94b..b840349e196c9c4f4f616c1310d3d888bf14da9d 100644 (file)
@@ -37,6 +37,7 @@ using namespace std;
 
 int DirectoryMemory::m_num_directories = 0;
 int DirectoryMemory::m_num_directories_bits = 0;
+uint64_t DirectoryMemory::m_total_size_bytes = 0;
 int DirectoryMemory::m_numa_high_bit = 0;
 
 DirectoryMemory::DirectoryMemory(const Params *p)
@@ -59,6 +60,7 @@ DirectoryMemory::init()
 
     m_num_directories++;
     m_num_directories_bits = ceilLog2(m_num_directories);
+    m_total_size_bytes += m_size_bytes;
 
     if (m_numa_high_bit == 0) {
         m_numa_high_bit = RubySystem::getMemorySizeBits() - 1;
index 98403808b52de8b8378df742e65f30707bb20848..a549366d09e1dddae2d42759af8dac0d16da539a 100644 (file)
@@ -76,6 +76,7 @@ class DirectoryMemory : public SimObject
 
     static int m_num_directories;
     static int m_num_directories_bits;
+    static uint64_t m_total_size_bytes;
     static int m_numa_high_bit;
 };
 
index 286d197723e92fcbea92f5f3911b8eb9bbc95ce8..a1e3b277e1e5f7b4939caa7a08dac1e0af03ed83 100644 (file)
@@ -50,7 +50,7 @@ LRUReplacementPolicyParams::create()
 
 
 void
-LRUPolicy::touch(int64_t set, int64_t index, Tick time)
+LRUPolicy::touch(int64 set, int64 index, Tick time)
 {
     assert(index >= 0 && index < m_assoc);
     assert(set >= 0 && set < m_num_sets);
@@ -58,11 +58,11 @@ LRUPolicy::touch(int64_t set, int64_t index, Tick time)
     m_last_ref_ptr[set][index] = time;
 }
 
-int64_t
-LRUPolicy::getVictim(int64_t set) const
+int64
+LRUPolicy::getVictim(int64 set) const
 {
     Tick time, smallest_time;
-    int64_t smallest_index;
+    int64 smallest_index;
 
     smallest_index = 0;
     smallest_time = m_last_ref_ptr[set][0];
index 388718319996894d9766f1eb89e273da79c976c0..9a9c9e3ebdfdd9909c2a8723a4e96fdccf006d75 100644 (file)
@@ -41,8 +41,8 @@ class LRUPolicy : public AbstractReplacementPolicy
     LRUPolicy(const Params * p);
     ~LRUPolicy();
 
-    void touch(int64_t set, int64_t way, Tick time);
-    int64_t getVictim(int64_t set) const;
+    void touch(int64 set, int64 way, Tick time);
+    int64 getVictim(int64 set) const;
 };
 
 #endif // __MEM_RUBY_STRUCTURES_LRUPOLICY_HH__
index a2b21a6259e378d4d145af29f9e6c814638d65ed..8eee0821b4e21a8e34685915c5b1b44a0df3179a 100644 (file)
@@ -38,7 +38,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p)
     // associativity cannot exceed capacity of tree representation
     assert(m_num_sets > 0 &&
            m_assoc > 1 &&
-           m_assoc <= (int64_t) sizeof(uint64_t)*4);
+           m_assoc <= (int64) sizeof(uint64)*4);
 
     m_trees = NULL;
     m_num_levels = 0;
@@ -55,7 +55,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p)
         m_num_levels++;
     }
     assert(m_num_levels < sizeof(unsigned int)*4);
-    m_trees = new uint64_t[m_num_sets];
+    m_trees = new uint64[m_num_sets];
     for (unsigned i = 0; i < m_num_sets; i++) {
         m_trees[i] = 0;
     }
@@ -75,7 +75,7 @@ PseudoLRUPolicy::~PseudoLRUPolicy()
 }
 
 void
-PseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time)
+PseudoLRUPolicy::touch(int64 set, int64 index, Tick time)
 {
     assert(index >= 0 && index < m_assoc);
     assert(set >= 0 && set < m_num_sets);
@@ -93,10 +93,10 @@ PseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time)
     m_last_ref_ptr[set][index] = time;
 }
 
-int64_t
-PseudoLRUPolicy::getVictim(int64_t set) const
+int64
+PseudoLRUPolicy::getVictim(int64 set) const
 {
-    int64_t index = 0;
+    int64 index = 0;
 
     int tree_index = 0;
     int node_val;
index a4a388cf5225f03c707d1358bdc16369a7186ad0..fc5add8b129a7da821cb50f12752f313a4d6f411 100644 (file)
@@ -53,13 +53,13 @@ class PseudoLRUPolicy : public AbstractReplacementPolicy
     PseudoLRUPolicy(const Params * p);
     ~PseudoLRUPolicy();
 
-    void touch(int64_t set, int64_t way, Tick time);
-    int64_t getVictim(int64_t set) const;
+    void touch(int64 set, int64 way, Tick time);
+    int64 getVictim(int64 set) const;
 
   private:
     unsigned int m_effective_assoc;    /** nearest (to ceiling) power of 2 */
     unsigned int m_num_levels;         /** number of levels in the tree */
-    uint64_t *m_trees;                   /** bit representation of the
+    uint64m_trees;                   /** bit representation of the
                                         * trees, one for each set */
 };
 
index 413850627d8023b764065e4a8264fe6c3b11a957..0521aac06a54470b54aa41f3a2dfbb865f2a6ef6 100644 (file)
@@ -176,7 +176,7 @@ void
 RubyMemoryControl::init()
 {
     m_msg_counter = 0;
-    assert(m_tFaw <= 62); // must fit in a uint64_t shift register
+    assert(m_tFaw <= 62); // must fit in a uint64 shift register
 
     m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
     m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
@@ -213,7 +213,7 @@ RubyMemoryControl::init()
     // m_tfaw_count keeps track of how many 1 bits are set
     // in each shift register.  When m_tfaw_count is >= 4,
     // new activates are not allowed.
-    m_tfaw_shift = new uint64_t[m_total_ranks];
+    m_tfaw_shift = new uint64[m_total_ranks];
     m_tfaw_count = new int[m_total_ranks];
     for (int i = 0; i < m_total_ranks; i++) {
         m_tfaw_shift[i] = 0;
@@ -236,7 +236,7 @@ RubyMemoryControl::reset()
 {
     m_msg_counter = 0;
 
-    assert(m_tFaw <= 62); // must fit in a uint64_t shift register
+    assert(m_tFaw <= 62); // must fit in a uint64 shift register
 
     m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
     m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
index 376ce4d75b42b9c6821c62ff01d9cc68a2ebdc14..c68a2da6c9bc047cf39b75aa6f91ab784bfec4c8 100644 (file)
@@ -162,11 +162,11 @@ class RubyMemoryControl : public AbstractMemory, public Consumer
 
     // Each entry indicates number of address-bus cycles until bank
     // is reschedulable:
-    int *m_bankBusyCounter;
-    int *m_oldRequest;
+    intm_bankBusyCounter;
+    intm_oldRequest;
 
-    uint64_t *m_tfaw_shift;
-    int *m_tfaw_count;
+    uint64m_tfaw_shift;
+    intm_tfaw_count;
 
     // Each of these indicates number of address-bus cycles until
     // we can issue a new request of the corresponding type:
@@ -182,12 +182,12 @@ class RubyMemoryControl : public AbstractMemory, public Consumer
     int m_ageCounter;         // age of old requests; to detect starvation
     int m_idleCount;          // watchdog timer for shutting down
 
-    MemCntrlProfiler *m_profiler_ptr;
+    MemCntrlProfilerm_profiler_ptr;
 
     class MemCntrlEvent : public Event
     {
       public:
-        MemCntrlEvent(RubyMemoryControl *_mem_cntrl)
+        MemCntrlEvent(RubyMemoryControl_mem_cntrl)
         {
             mem_cntrl = _mem_cntrl;
         }
index 9568d6a88ec384e832bd42062bd59e3864b04b9a..a2ac6bdf835afb2467c3b3b640ff17454ab297f3 100644 (file)
@@ -58,6 +58,15 @@ CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
       m_seq_map(seq_map),  m_bytes_read(0), m_records_read(0),
       m_records_flushed(0), m_block_size_bytes(block_size_bytes)
 {
+    if (m_uncompressed_trace != NULL) {
+        if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
+            // Block sizes larger than when the trace was recorded are not
+            // supported, as we cannot reliably turn accesses to smaller blocks
+            // into larger ones.
+            panic("Recorded cache block size (%d) < current block size (%d) !!",
+                    m_block_size_bytes, RubySystem::getBlockSizeBytes());
+        }
+    }
 }
 
 CacheRecorder::~CacheRecorder()
@@ -152,13 +161,13 @@ CacheRecorder::addRecord(int cntrl, Addr data_addr, Addr pc_addr,
     m_records.push_back(rec);
 }
 
-uint64_t
-CacheRecorder::aggregateRecords(uint8_t **buf, uint64_t total_size)
+uint64
+CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size)
 {
     std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
 
     int size = m_records.size();
-    uint64_t current_size = 0;
+    uint64 current_size = 0;
     int record_size = sizeof(TraceRecord) + m_block_size_bytes;
 
     for (int i = 0; i < size; ++i) {
index 44110cf9f30e7b21c1e4d18a849a4752dab0225f..a4a7261f40a70b3d52d9eda3aa900b01117f2484 100644 (file)
@@ -77,7 +77,7 @@ class CacheRecorder
     void addRecord(int cntrl, Addr data_addr, Addr pc_addr,
                    RubyRequestType type, Tick time, DataBlock& data);
 
-    uint64_t aggregateRecords(uint8_t **data, uint64_t size);
+    uint64 aggregateRecords(uint8_t** data, uint64 size);
 
     /*!
      * Function for flushing the memory contents of the caches to the
index 9ffaa5702b64a8a0d528e737bb59a34f423e485d..81a9a181bce229fc3457185d33e61cc31ce7af11 100644 (file)
@@ -34,6 +34,7 @@ from SimpleMemory import *
 class RubySystem(ClockedObject):
     type = 'RubySystem'
     cxx_header = "mem/ruby/system/System.hh"
+    random_seed = Param.Int(1234, "random seed used by the simulation");
     randomization = Param.Bool(False,
         "insert random delays on message enqueue times");
     block_size_bytes = Param.UInt32(64,
@@ -41,13 +42,11 @@ class RubySystem(ClockedObject):
     memory_size_bits = Param.UInt32(64,
         "number of bits that a memory address requires");
 
-    phys_mem = Param.SimpleMemory(NULL, "")
-
-    access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
-        store and only use ruby for timing.")
-
     # Profiler related configuration variables
     hot_lines = Param.Bool(False, "")
     all_instructions = Param.Bool(False, "")
     num_of_sequencers = Param.Int("")
-    number_of_virtual_networks = Param.Unsigned("")
+    phys_mem = Param.SimpleMemory(NULL, "")
+
+    access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
+        store and only use ruby for timing.")
index 740db7d8ded9ad84d1ac7638ab7d28bde9729d65..30575879823646e67174283e8ce857411bdf1e3c 100644 (file)
@@ -317,27 +317,28 @@ Sequencer::removeRequest(SequencerRequest* srequest)
 void
 Sequencer::invalidateSC(Addr address)
 {
-    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
-    // The controller has lost the coherence permissions, hence the lock
-    // on the cache line maintained by the cache should be cleared.
-    if (e && e->isLocked(m_version)) {
-        e->clearLocked();
+    RequestTable::iterator i = m_writeRequestTable.find(address);
+    if (i != m_writeRequestTable.end()) {
+        SequencerRequest* request = i->second;
+        // The controller has lost the coherence permissions, hence the lock
+        // on the cache line maintained by the cache should be cleared.
+        if (request->m_type == RubyRequestType_Store_Conditional) {
+            m_dataCache_ptr->clearLocked(address);
+        }
     }
 }
 
 bool
 Sequencer::handleLlsc(Addr address, SequencerRequest* request)
 {
-    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
-    if (!e)
-        return true;
-
+    //
     // The success flag indicates whether the LLSC operation was successful.
     // LL ops will always succeed, but SC may fail if the cache line is no
     // longer locked.
+    //
     bool success = true;
     if (request->m_type == RubyRequestType_Store_Conditional) {
-        if (!e->isLocked(m_version)) {
+        if (!m_dataCache_ptr->isLocked(address, m_version)) {
             //
             // For failed SC requests, indicate the failure to the cpu by
             // setting the extra data to zero.
@@ -354,18 +355,19 @@ Sequencer::handleLlsc(Addr address, SequencerRequest* request)
         //
         // Independent of success, all SC operations must clear the lock
         //
-        e->clearLocked();
+        m_dataCache_ptr->clearLocked(address);
     } else if (request->m_type == RubyRequestType_Load_Linked) {
         //
         // Note: To fully follow Alpha LLSC semantics, should the LL clear any
         // previously locked cache lines?
         //
-        e->setLocked(m_version);
-    } else if (e->isLocked(m_version)) {
+        m_dataCache_ptr->setLocked(address, m_version);
+    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
+               (m_dataCache_ptr->isLocked(address, m_version))) {
         //
         // Normal writes should clear the locked address
         //
-        e->clearLocked();
+        m_dataCache_ptr->clearLocked(address);
     }
     return success;
 }
@@ -496,15 +498,19 @@ Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
                        const Cycles forwardRequestTime,
                        const Cycles firstResponseTime)
 {
-    warn_once("Replacement policy updates recently became the responsibility "
-              "of SLICC state machines. Make sure to setMRU() near callbacks "
-              "in .sm files!");
-
     PacketPtr pkt = srequest->pkt;
     Addr request_address(pkt->getAddr());
+    Addr request_line_address = makeLineAddress(pkt->getAddr());
     RubyRequestType type = srequest->m_type;
     Cycles issued_time = srequest->issue_time;
 
+    // Set this cache entry to the most recently used
+    if (type == RubyRequestType_IFETCH) {
+        m_instCache_ptr->setMRU(request_line_address);
+    } else {
+        m_dataCache_ptr->setMRU(request_line_address);
+    }
+
     assert(curCycle() >= issued_time);
     Cycles total_latency = curCycle() - issued_time;
 
index cb485a47bd3311c99b2226a0c57b3f481f7a727e..c0008201030e53d61fb48f86556979ebd8540396 100644 (file)
@@ -45,6 +45,7 @@
 
 using namespace std;
 
+int RubySystem::m_random_seed;
 bool RubySystem::m_randomization;
 uint32_t RubySystem::m_block_size_bytes;
 uint32_t RubySystem::m_block_size_bits;
@@ -59,6 +60,8 @@ RubySystem::RubySystem(const Params *p)
     : ClockedObject(p), m_access_backing_store(p->access_backing_store),
       m_cache_recorder(NULL)
 {
+    m_random_seed = p->random_seed;
+    srandom(m_random_seed);
     m_randomization = p->randomization;
 
     m_block_size_bytes = p->block_size_bytes;
@@ -99,8 +102,8 @@ RubySystem::~RubySystem()
 
 void
 RubySystem::makeCacheRecorder(uint8_t *uncompressed_trace,
-                              uint64_t cache_trace_size,
-                              uint64_t block_size_bytes)
+                              uint64 cache_trace_size,
+                              uint64 block_size_bytes)
 {
     vector<Sequencer*> sequencer_map;
     Sequencer* sequencer_ptr = NULL;
@@ -204,7 +207,7 @@ RubySystem::memWriteback()
 
 void
 RubySystem::writeCompressedTrace(uint8_t *raw_data, string filename,
-                                 uint64_t uncompressed_trace_size)
+                                 uint64 uncompressed_trace_size)
 {
     // Create the checkpoint file for the memory
     string thefile = CheckpointIn::dir() + "/" + filename.c_str();
@@ -237,7 +240,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
     // Store the cache-block size, so we are able to restore on systems with a
     // different cache-block size. CacheRecorder depends on the correct
     // cache-block size upon unserializing.
-    uint64_t block_size_bytes = getBlockSizeBytes();
+    uint64 block_size_bytes = getBlockSizeBytes();
     SERIALIZE_SCALAR(block_size_bytes);
 
     // Check that there's a valid trace to use.  If not, then memory won't be
@@ -249,7 +252,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
 
     // Aggregate the trace entries together into a single array
     uint8_t *raw_data = new uint8_t[4096];
-    uint64_t cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
+    uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
                                                                  4096);
     string cache_trace_file = name() + ".cache.gz";
     writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size);
@@ -264,7 +267,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
 
 void
 RubySystem::readCompressedTrace(string filename, uint8_t *&raw_data,
-                                uint64_t &uncompressed_trace_size)
+                                uint64uncompressed_trace_size)
 {
     // Read the trace file
     gzFile compressedTrace;
@@ -301,19 +304,11 @@ RubySystem::unserialize(CheckpointIn &cp)
     // This value should be set to the checkpoint-system's block-size.
     // Optional, as checkpoints without it can be run if the
     // checkpoint-system's block-size == current block-size.
-    uint64_t block_size_bytes = m_block_size_bytes;
+    uint64 block_size_bytes = getBlockSizeBytes();
     UNSERIALIZE_OPT_SCALAR(block_size_bytes);
 
-    if (block_size_bytes < m_block_size_bytes) {
-        // Block sizes larger than when the trace was recorded are not
-        // supported, as we cannot reliably turn accesses to smaller blocks
-        // into larger ones.
-        panic("Recorded cache block size (%d) < current block size (%d) !!",
-              block_size_bytes, m_block_size_bytes);
-    }
-
     string cache_trace_file;
-    uint64_t cache_trace_size = 0;
+    uint64 cache_trace_size = 0;
 
     UNSERIALIZE_SCALAR(cache_trace_file);
     UNSERIALIZE_SCALAR(cache_trace_size);
index 70d216201c123ec63b083e4a367f899bd003be34..787e4f4ae5eef8ca3a76fade2e0b09db626c0aa1 100644 (file)
@@ -70,6 +70,7 @@ class RubySystem : public ClockedObject
     ~RubySystem();
 
     // config accessors
+    static int getRandomSeed() { return m_random_seed; }
     static int getRandomization() { return m_randomization; }
     static uint32_t getBlockSizeBytes() { return m_block_size_bytes; }
     static uint32_t getBlockSizeBits() { return m_block_size_bits; }
@@ -117,17 +118,18 @@ class RubySystem : public ClockedObject
     RubySystem& operator=(const RubySystem& obj);
 
     void makeCacheRecorder(uint8_t *uncompressed_trace,
-                           uint64_t cache_trace_size,
-                           uint64_t block_size_bytes);
+                           uint64 cache_trace_size,
+                           uint64 block_size_bytes);
 
     void readCompressedTrace(std::string filename,
                              uint8_t *&raw_data,
-                             uint64_t &uncompressed_trace_size);
+                             uint64uncompressed_trace_size);
     void writeCompressedTrace(uint8_t *raw_data, std::string file,
-                              uint64_t uncompressed_trace_size);
+                              uint64 uncompressed_trace_size);
 
   private:
     // configuration parameters
+    static int m_random_seed;
     static bool m_randomization;
     static uint32_t m_block_size_bytes;
     static uint32_t m_block_size_bits;
index bc0c1c224fce56e9cf0e4d15ed41251f8e8b7966..d97c134831d6f1515f666bbb3e23e4e7f6ab3690 100644 (file)
@@ -67,6 +67,6 @@ class EnumDeclAST(DeclAST):
         pairs = { "external" : "yes" }
         func = Func(self.symtab, func_id + "_" + t.c_ident,
                     func_id, self.location,
-                    self.symtab.find("std::string", Type), [ t ], [], [], "",
+                    self.symtab.find("std::string", Type), [ t ], [], "",
                     pairs)
         self.symtab.newSymbol(func)
index ef39b40f0e893c84724c223ad33c1c517a7c4fbe..ce73304f120cc51f48d8f8f2d4d6dd965f959ee2 100644 (file)
@@ -46,9 +46,6 @@ class FormalParamAST(AST):
     def generate(self):
         type = self.type_ast.type
         param = "param_%s" % self.ident
-        proto = ""
-        body = ""
-        default = False
 
         # Add to symbol table
         v = Var(self.symtab, self.ident, self.location, type, param,
@@ -59,21 +56,6 @@ class FormalParamAST(AST):
            "interface" in type and (
                type["interface"] == "AbstractCacheEntry" or
                type["interface"] == "AbstractEntry")):
-            proto = "%s* %s" % (type.c_ident, param)
-            body = proto
-        elif self.default != None:
-            value = ""
-            if self.default == True:
-                value = "true"
-            elif self.default == False:
-                value = "false"
-            else:
-                value = "%s" % self.default
-            proto = "const %s& %s = %s" % (type.c_ident, param, value)
-            body = "const %s& %s" % (type.c_ident, param)
-            default = True
+            return type, "%s* %s" % (type.c_ident, param)
         else:
-            proto = "const %s& %s" % (type.c_ident, param)
-            body = proto
-
-        return type, proto, body, default
+            return type, "const %s& %s" % (type.c_ident, param)
index 0c9880d6dd1bfb4407d5493458492b135e318c42..9336a22977ee232178652ec4e0cbe46afd50a2c2 100644 (file)
@@ -93,7 +93,22 @@ class FuncCallExprAST(ExprAST):
         if func is None:
             self.error("Unrecognized function name: '%s'", func_name_args)
 
-        cvec, type_vec = func.checkArguments(self.exprs)
+        if len(self.exprs) != len(func.param_types):
+            self.error("Wrong number of arguments passed to function : '%s'" +\
+                       " Expected %d, got %d", self.proc_name,
+                       len(func.param_types), len(self.exprs))
+
+        cvec = []
+        type_vec = []
+        for expr,expected_type in zip(self.exprs, func.param_types):
+            # Check the types of the parameter
+            actual_type,param_code = expr.inline(True)
+            if str(actual_type) != 'OOD' and \
+            str(actual_type) != str(expected_type):
+                expr.error("Type mismatch: expected: %s actual: %s" % \
+                           (expected_type, actual_type))
+            cvec.append(param_code)
+            type_vec.append(expected_type)
 
         # OK, the semantics of "trigger" here is that, ports in the
         # machine have different priorities. We always check the first
index 4e64c0ba5e6417072867cf40cb98ba6a7bdc8c04..47ae7076ef3ce74f8b6463854d152f7a1dcb6bad 100644 (file)
@@ -45,9 +45,7 @@ class FuncDeclAST(DeclAST):
 
     def generate(self, parent = None):
         types = []
-        proto_params = []
-        body_params = []
-        default_count = 0
+        params = []
         void_type = self.symtab.find("void", Type)
 
         # Generate definition code
@@ -60,17 +58,13 @@ class FuncDeclAST(DeclAST):
         for formal in self.formals:
             # Lookup parameter types
             try:
-                type, proto, body, default = formal.generate()
+                type, ident = formal.generate()
                 types.append(type)
-                proto_params.append(proto)
-                body_params.append(body)
-                if default:
-                    default_count += 1
+                params.append(ident)
 
             except AttributeError:
                 types.append(formal.type)
-                proto_params.append(None)
-                body_params.append(None)
+                params.append(None)
 
         body = self.slicc.codeFormatter()
         if self.statements is None:
@@ -93,8 +87,7 @@ class FuncDeclAST(DeclAST):
 
         machine = self.state_machine
         func = Func(self.symtab, func_name_args, self.ident, self.location,
-                    return_type, types, proto_params,
-                    body_params, str(body), self.pairs, default_count)
+                    return_type, types, params, str(body), self.pairs)
 
         if parent is not None:
             if not parent.addFunc(func):
index 2ef0431519ed98f8764380bef3b4661a54b4b14d..7a019a0e07792a3a4e4382fb05bfa0a228f1ac58 100644 (file)
@@ -89,13 +89,13 @@ class InPortDeclAST(DeclAST):
         for param in param_types:
             trigger_func_name += "_" + param.ident
         func = Func(self.symtab, trigger_func_name, "trigger", self.location,
-                    void_type, param_types, [], [], "", pairs)
+                    void_type, param_types, [], "", pairs)
         symtab.newSymbol(func)
 
         # Add the stallPort method - this hacks reschedules the controller
         # for stalled messages that don't trigger events
         func = Func(self.symtab, "stallPort", "stallPort", self.location,
-                    void_type, [], [], [], "", pairs)
+                    void_type, [], [], "", pairs)
         symtab.newSymbol(func)
 
         param_types = []
index 104d6f8df4a6b8b77e48722020d0528bcfa6b5ad..8be319a40d0a0a26eaf9fc998c565008d1b8a730 100644 (file)
@@ -56,8 +56,20 @@ class MethodCallExprAST(ExprAST):
             self.error("Invalid method call: Type '%s' does not have a method '%s'",
                        obj_type, methodId)
 
-        func = obj_type.methods[methodId]
-        func.checkArguments(self.expr_ast_vec)
+        if len(self.expr_ast_vec) != \
+               len(obj_type.methods[methodId].param_types):
+            # Right number of parameters
+            self.error("Wrong number of parameters for function name: '%s', " + \
+                       "expected: , actual: ", proc_name,
+                  len(obj_type.methods[methodId].param_types),
+                  len(self.expr_ast_vec))
+
+        for actual_type, expected_type in \
+                zip(paramTypes, obj_type.methods[methodId].param_types):
+            if actual_type != expected_type and \
+                   str(actual_type["interface"]) != str(expected_type):
+                self.error("Type mismatch: expected: %s actual: %s",
+                           expected_type, actual_type)
 
         # Return the return type of the method
         return obj_type.methods[methodId].return_type
@@ -66,9 +78,10 @@ class MethodCallExprAST(ExprAST):
         pass
 
 class MemberMethodCallExprAST(MethodCallExprAST):
-    def __init__(self, slicc, obj_expr_ast, func_call):
+    def __init__(self, slicc, obj_expr_ast, proc_name, expr_ast_vec):
         s = super(MemberMethodCallExprAST, self)
-        s.__init__(slicc, func_call.proc_name, func_call.exprs)
+        s.__init__(slicc, proc_name, expr_ast_vec)
+
         self.obj_expr_ast = obj_expr_ast
 
     def __repr__(self):
index a33ea9245689bb2e23b6fb8c3caad65d8a3ad494..f0a0b97d37f08a5fa4ea36f29acc5f1d3f82f7ca 100644 (file)
@@ -66,7 +66,7 @@ class StateDeclAST(DeclAST):
         pairs = { "external" : "yes" }
         func = Func(self.symtab, func_id + "_" +
                     t.ident, func_id, self.location,
-                    self.symtab.find("std::string", Type), [ t ], [], [], "",
+                    self.symtab.find("std::string", Type), [ t ], [], "",
                     pairs)
         self.symtab.newSymbol(func)
 
@@ -76,6 +76,6 @@ class StateDeclAST(DeclAST):
         pairs = { "external" : "yes" }
         func = Func(self.symtab, func_id + "_" +
                     t.ident, func_id, self.location,
-                    self.symtab.find("AccessPermission", Type), [ t ], [], [], "",
+                    self.symtab.find("AccessPermission", Type), [ t ], [], "",
                     pairs)
         self.symtab.newSymbol(func)
index 07c067f680b58a30f4e7e56e97ad16dfb5b97ee1..0cbe9ea6388e49a658cace0667b14a49ef626e3e 100644 (file)
@@ -669,13 +669,15 @@ class SLICC(Grammar):
 
     def p_expr__member_method_call(self, p):
         "aexpr : aexpr DOT ident '(' exprs ')'"
-        p[0] = ast.MemberMethodCallExprAST(self, p[1],
-                    ast.FuncCallExprAST(self, p[3], p[5]))
+        p[0] = ast.MemberMethodCallExprAST(self, p[1], p[3], p[5])
+
+    def p_expr__member_method_call_lookup(self, p):
+        "aexpr : aexpr '[' exprs ']'"
+        p[0] = ast.MemberMethodCallExprAST(self, p[1], "lookup", p[3])
 
     def p_expr__class_method_call(self, p):
         "aexpr : type DOUBLE_COLON ident '(' exprs ')'"
-        p[0] = ast.ClassMethodCallExprAST(self, p[1],
-                    ast.FuncCallExprAST(self, p[3], p[5]))
+        p[0] = ast.ClassMethodCallExprAST(self, p[1], p[3], p[5])
 
     def p_expr__aexpr(self, p):
         "expr : aexpr"
index 695450b9c50f0e4138641750b64837d2033b307a..d50d0309fdaa11412509818fe592832235624c9d 100644 (file)
@@ -30,19 +30,16 @@ from slicc.symbols.Type import Type
 
 class Func(Symbol):
     def __init__(self, table, ident, name, location, return_type, param_types,
-                 proto_param_strings, body_param_strings, body,
-                 pairs, default_count = 0):
+                 param_strings, body, pairs):
         super(Func, self).__init__(table, ident, location, pairs)
         self.return_type = return_type
         self.param_types = param_types
-        self.proto_param_strings = proto_param_strings
-        self.body_param_strings = body_param_strings
+        self.param_strings = param_strings
         self.body = body
         self.isInternalMachineFunc = False
         self.c_ident = ident
         self.c_name = name
         self.class_name = ""
-        self.default_count = default_count
 
     def __repr__(self):
         return ""
@@ -60,33 +57,11 @@ class Func(Symbol):
             return_type += "*"
 
         return "%s %s(%s);" % (return_type, self.c_name,
-                               ", ".join(self.proto_param_strings))
+                               ", ".join(self.param_strings))
 
     def writeCodeFiles(self, path, includes):
         return
 
-    def checkArguments(self, args):
-        if len(args) + self.default_count < len(self.param_types) or \
-           len(args) > len(self.param_types):
-            self.error("Wrong number of arguments passed to function: '%s'" + \
-                       " Expected at least: %d, got: %d", self.c_ident,
-                       len(self.param_types) - self.default_count, len(args))
-
-        cvec = []
-        type_vec = []
-        for expr,expected_type in zip(args, self.param_types):
-            # Check the types of the parameter
-            actual_type,param_code = expr.inline(True)
-            if str(actual_type) != 'OOD' and \
-               str(actual_type) != str(expected_type) and \
-               str(actual_type["interface"]) != str(expected_type):
-                expr.error("Type mismatch: expected: %s actual: %s" % \
-                           (expected_type, actual_type))
-            cvec.append(param_code)
-            type_vec.append(expected_type)
-
-        return cvec, type_vec
-
     def generateCode(self):
         '''This write a function of object Chip'''
         if "external" in self:
@@ -95,14 +70,14 @@ class Func(Symbol):
         code = self.symtab.codeFormatter()
 
         # Generate function header
-        return_type = self.return_type.c_ident
         void_type = self.symtab.find("void", Type)
+        return_type = self.return_type.c_ident
         if "return_by_ref" in self and self.return_type != void_type:
             return_type += "&"
         if "return_by_pointer" in self and self.return_type != void_type:
             return_type += "*"
 
-        params = ', '.join(self.body_param_strings)
+        params = ', '.join(self.param_strings)
 
         code('''
 $return_type
index 3dce3c3f2124e6cf0b8304a89a1de62ca696e997..03c78c8bf0cabfab2316202a4d92971ae56a6acf 100644 (file)
@@ -320,9 +320,9 @@ class $c_ident : public AbstractController
 
     void countTransition(${ident}_State state, ${ident}_Event event);
     void possibleTransition(${ident}_State state, ${ident}_Event event);
-    uint64_t getEventCount(${ident}_Event event);
+    uint64 getEventCount(${ident}_Event event);
     bool isPossible(${ident}_State state, ${ident}_Event event);
-    uint64_t getTransitionCount(${ident}_State state, ${ident}_Event event);
+    uint64 getTransitionCount(${ident}_State state, ${ident}_Event event);
 
 private:
 ''')
@@ -802,7 +802,7 @@ $c_ident::possibleTransition(${ident}_State state,
     m_possible[state][event] = true;
 }
 
-uint64_t
+uint64
 $c_ident::getEventCount(${ident}_Event event)
 {
     return m_event_counters[event];
@@ -814,7 +814,7 @@ $c_ident::isPossible(${ident}_State state, ${ident}_Event event)
     return m_possible[state][event];
 }
 
-uint64_t
+uint64
 $c_ident::getTransitionCount(${ident}_State state,
                              ${ident}_Event event)
 {
@@ -1213,6 +1213,8 @@ TransitionResult result =
         else:
             code('doTransitionWorker(event, state, next_state, addr);')
 
+        port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident)
+
         code('''
 
 if (result == TransitionResult_Valid) {