ruby: message buffer, timer table: significant changes
authorNilay Vaish <nilay@cs.wisc.edu>
Wed, 16 Sep 2015 16:59:56 +0000 (11:59 -0500)
committerNilay Vaish <nilay@cs.wisc.edu>
Wed, 16 Sep 2015 16:59:56 +0000 (11:59 -0500)
This patch changes MessageBuffer and TimerTable, two structures used for
buffering messages by components in ruby.  These structures would no longer
maintain pointers to clock objects.  Functions in these structures have been
changed to take as input current time in Tick.  Similarly, these structures
will not operate on Cycle valued latencies for different operations.  The
corresponding functions would need to be provided with these latencies by
components invoking the relevant functions.  These latencies should also be
in Ticks.

I felt the need for these changes while trying to speed up ruby.  The ultimate
aim is to eliminate Consumer class and replace it with an EventManager object in
the MessageBuffer and TimerTable classes.  This object would be used for
scheduling events.  The event itself would contain information on the object and
function to be invoked.

In hindsight, it seems I should have done this while I was moving away from use
of a single global clock in the memory system.  That change led to introduction
of clock objects that replaced the global clock object.  It never crossed my
mind that having clock object pointers is not a good design.  And now I really
don't like the fact that we have separate consumer, receiver and sender
pointers in message buffers.

46 files changed:
src/mem/protocol/MESI_Three_Level-L0cache.sm
src/mem/protocol/MESI_Three_Level-L1cache.sm
src/mem/protocol/MESI_Two_Level-L1cache.sm
src/mem/protocol/MESI_Two_Level-L2cache.sm
src/mem/protocol/MESI_Two_Level-dir.sm
src/mem/protocol/MESI_Two_Level-dma.sm
src/mem/protocol/MI_example-cache.sm
src/mem/protocol/MI_example-dir.sm
src/mem/protocol/MI_example-dma.sm
src/mem/protocol/MOESI_CMP_directory-L1cache.sm
src/mem/protocol/MOESI_CMP_directory-L2cache.sm
src/mem/protocol/MOESI_CMP_directory-dir.sm
src/mem/protocol/MOESI_CMP_directory-dma.sm
src/mem/protocol/MOESI_CMP_token-L1cache.sm
src/mem/protocol/MOESI_CMP_token-L2cache.sm
src/mem/protocol/MOESI_CMP_token-dir.sm
src/mem/protocol/MOESI_CMP_token-dma.sm
src/mem/protocol/MOESI_hammer-cache.sm
src/mem/protocol/MOESI_hammer-dir.sm
src/mem/protocol/MOESI_hammer-dma.sm
src/mem/protocol/Network_test-cache.sm
src/mem/protocol/Network_test-dir.sm
src/mem/protocol/RubySlicc_Defines.sm
src/mem/protocol/RubySlicc_Exports.sm
src/mem/protocol/RubySlicc_Types.sm
src/mem/ruby/network/MessageBuffer.cc
src/mem/ruby/network/MessageBuffer.hh
src/mem/ruby/network/MessageBuffer.py
src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
src/mem/ruby/network/simple/PerfectSwitch.cc
src/mem/ruby/network/simple/SimpleNetwork.py
src/mem/ruby/network/simple/Switch.cc
src/mem/ruby/network/simple/Throttle.cc
src/mem/ruby/slicc_interface/AbstractController.cc
src/mem/ruby/structures/TBETable.hh
src/mem/ruby/structures/TimerTable.cc
src/mem/ruby/structures/TimerTable.hh
src/mem/ruby/system/DMASequencer.cc
src/mem/ruby/system/RubyPort.cc
src/mem/ruby/system/Sequencer.cc
src/mem/slicc/ast/EnqueueStatementAST.py
src/mem/slicc/ast/ObjDeclAST.py
src/mem/slicc/ast/PeekStatementAST.py
src/mem/slicc/ast/StallAndWaitStatementAST.py
src/mem/slicc/symbols/StateMachine.py

index 7e8626dc9dd0f9f4df7ff009658f058af315e01a..3f22a4906a7b302c04062661f0f7a2a643b455b0 100644 (file)
@@ -135,6 +135,8 @@ machine(L0Cache, "MESI Directory L0 Cache")
 
   TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
 
+  Tick clockEdge();
+  Cycles ticksToCycles(Tick t);
   void set_cache_entry(AbstractCacheEntry a);
   void unset_cache_entry();
   void set_tbe(TBE a);
@@ -255,7 +257,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
 
   // Messages for this L0 cache from the L1 cache
   in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
-    if (messgeBuffer_in.isReady()) {
+    if (messgeBuffer_in.isReady(clockEdge())) {
       peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
         assert(in_msg.Dest == machineID);
 
@@ -289,7 +291,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
 
   // Mandatory Queue betweens Node's CPU and it's L0 caches
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -513,17 +515,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
   }
 
   action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(l_popRequestQueue, "l",
          desc="Pop incoming request queue and profile the delay within this virtual network") {
-    profileMsgDelay(2, messgeBuffer_in.dequeue());
+    Tick delay := messgeBuffer_in.dequeue(clockEdge());
+    profileMsgDelay(2, ticksToCycles(delay));
   }
 
   action(o_popIncomingResponseQueue, "o",
          desc="Pop Incoming Response queue and profile the delay within this virtual network") {
-    profileMsgDelay(1, messgeBuffer_in.dequeue());
+    Tick delay := messgeBuffer_in.dequeue(clockEdge());
+    profileMsgDelay(1, ticksToCycles(delay));
   }
 
   action(s_deallocateTBE, "s", desc="Deallocate TBE") {
index 6c8df8d754d7879a9a656d78f49ab7eccc130bdd..0eb9a43b53121d697d3d30b7f2310e98b674f008 100644 (file)
@@ -151,6 +151,8 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
 
+  Tick clockEdge();
+  Cycles ticksToCycles(Tick t);
   void set_cache_entry(AbstractCacheEntry a);
   void unset_cache_entry();
   void set_tbe(TBE a);
@@ -266,7 +268,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // Response From the L2 Cache to this L1 cache
   in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg) {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -303,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // Request to this L1 cache from the shared L2
   in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
-    if(requestNetwork_in.isReady()) {
+    if(requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -340,7 +342,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // Requests to this L1 cache from the L0 cache.
   in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
-    if (messageBufferFromL0_in.isReady()) {
+    if (messageBufferFromL0_in.isReady(clockEdge())) {
       peek(messageBufferFromL0_in, CoherenceMsg) {
         Entry cache_entry := getCacheEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -634,17 +636,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
-    messageBufferFromL0_in.dequeue();
+    messageBufferFromL0_in.dequeue(clockEdge());
   }
 
   action(l_popL2RequestQueue, "l",
          desc="Pop incoming request queue and profile the delay within this virtual network") {
-    profileMsgDelay(2, requestNetwork_in.dequeue());
+    Tick delay := requestNetwork_in.dequeue(clockEdge());
+    profileMsgDelay(2, ticksToCycles(delay));
   }
 
   action(o_popL2ResponseQueue, "o",
          desc="Pop Incoming Response queue and profile the delay within this virtual network") {
-    profileMsgDelay(1, responseNetwork_in.dequeue());
+    Tick delay := responseNetwork_in.dequeue(clockEdge());
+    profileMsgDelay(1, ticksToCycles(delay));
   }
 
   action(s_deallocateTBE, "s", desc="Deallocate TBE") {
index b9be4663fcad87a20664324a0328b2daa7b0cd37..c40a47caee8ec13a5089be37b1e0edfb4aac325f 100644 (file)
@@ -156,6 +156,8 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
 
+  Tick clockEdge();
+  Cycles ticksToCycles(Tick t);
   void set_cache_entry(AbstractCacheEntry a);
   void unset_cache_entry();
   void set_tbe(TBE a);
@@ -296,7 +298,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   // searches of all entries in the queue, not just the head msg. All
   // msgs in the structure can be invalidated if a demand miss matches.
   in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
-      if (optionalQueue_in.isReady()) {
+      if (optionalQueue_in.isReady(clockEdge())) {
           peek(optionalQueue_in, RubyRequest) {
               // Instruction Prefetch
               if (in_msg.Type == RubyRequestType:IFETCH) {
@@ -373,7 +375,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // Response  L1 Network - response msg to this L1 cache
   in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
-    if (responseL1Network_in.isReady()) {
+    if (responseL1Network_in.isReady(clockEdge())) {
       peek(responseL1Network_in, ResponseMsg, block_on="addr") {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -413,7 +415,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // Request InterChip network - request from this L1 cache to the shared L2
   in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
-    if(requestL1Network_in.isReady()) {
+    if(requestL1Network_in.isReady(clockEdge())) {
       peek(requestL1Network_in, RequestMsg, block_on="addr") {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -439,7 +441,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
 
   // Mandatory Queue betweens Node's CPU and it's L1 caches
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -866,17 +868,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(l_popRequestQueue, "l",
     desc="Pop incoming request queue and profile the delay within this virtual network") {
-    profileMsgDelay(2, requestL1Network_in.dequeue());
+    Tick delay := requestL1Network_in.dequeue(clockEdge());
+    profileMsgDelay(2, ticksToCycles(delay));
   }
 
   action(o_popIncomingResponseQueue, "o",
     desc="Pop Incoming Response queue and profile the delay within this virtual network") {
-    profileMsgDelay(1, responseL1Network_in.dequeue());
+    Tick delay := responseL1Network_in.dequeue(clockEdge());
+    profileMsgDelay(1, ticksToCycles(delay));
   }
 
   action(s_deallocateTBE, "s", desc="Deallocate TBE") {
@@ -963,7 +967,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
   }
 
   action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
-      optionalQueue_in.dequeue();
+      optionalQueue_in.dequeue(clockEdge());
   }
 
   action(mp_markPrefetched, "mp", desc="Write data from response queue to cache") {
index e4f719d9f993779a0deb4ade3b6a58e9e4f093d8..4134b796497a006e8e7776f4fb7b4056d7529089 100644 (file)
@@ -148,6 +148,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
 
   TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
+  Cycles ticksToCycles(Tick t);
+
   void set_cache_entry(AbstractCacheEntry a);
   void unset_cache_entry();
   void set_tbe(TBE a);
@@ -285,7 +289,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
 
 
   in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
-    if(L1unblockNetwork_in.isReady()) {
+    if(L1unblockNetwork_in.isReady(clockEdge())) {
       peek(L1unblockNetwork_in,  ResponseMsg) {
         Entry cache_entry := getCacheEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -307,7 +311,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
 
   // Response  L2 Network - response msg to this particular L2 bank
   in_port(responseL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
-    if (responseL2Network_in.isReady()) {
+    if (responseL2Network_in.isReady(clockEdge())) {
       peek(responseL2Network_in, ResponseMsg) {
         // test wether it's from a local L1 or an off chip source
         assert(in_msg.Destination.isElement(machineID));
@@ -348,7 +352,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
 
   // L1 Request
   in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
-    if(L1RequestL2Network_in.isReady()) {
+    if(L1RequestL2Network_in.isReady(clockEdge())) {
       peek(L1RequestL2Network_in,  RequestMsg) {
         Entry cache_entry := getCacheEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -604,15 +608,18 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
   }
 
   action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
-    profileMsgDelay(0, L1RequestL2Network_in.dequeue());
+    Tick delay := L1RequestL2Network_in.dequeue(clockEdge());
+    profileMsgDelay(0, ticksToCycles(delay));
   }
 
   action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
-    profileMsgDelay(0, L1unblockNetwork_in.dequeue());
+    Tick delay := L1unblockNetwork_in.dequeue(clockEdge());
+    profileMsgDelay(0, ticksToCycles(delay));
   }
 
   action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
-    profileMsgDelay(1, responseL2Network_in.dequeue());
+    Tick delay := responseL2Network_in.dequeue(clockEdge());
+    profileMsgDelay(1, ticksToCycles(delay));
   }
 
   action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
@@ -769,7 +776,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
   }
 
   action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
-    responseL2Network_in.recycle();
+    responseL2Network_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
index 7484d001c594d8c070e1e8a2bf595fd4d3db9c0c..c9fbe38751e8d81b01b47cd2bb8ad252cc69adb0 100644 (file)
@@ -98,6 +98,8 @@ machine(Directory, "MESI Two Level directory protocol")
   // ** OBJECTS **
   TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
   void set_tbe(TBE tbe);
   void unset_tbe();
   void wakeUpBuffers(Addr a);
@@ -190,7 +192,7 @@ machine(Directory, "MESI Two Level directory protocol")
   // ** IN_PORTS **
 
   in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
-    if (requestNetwork_in.isReady()) {
+    if (requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (isGETRequest(in_msg.Type)) {
@@ -210,7 +212,7 @@ machine(Directory, "MESI Two Level directory protocol")
   }
 
   in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
@@ -227,7 +229,7 @@ machine(Directory, "MESI Two Level directory protocol")
 
   // off-chip memory request/response is done
   in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
-    if (memQueue_in.isReady()) {
+    if (memQueue_in.isReady(clockEdge())) {
       peek(memQueue_in, MemoryMsg) {
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
           trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -286,15 +288,15 @@ machine(Directory, "MESI Two Level directory protocol")
   }
 
   action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
-    responseNetwork_in.dequeue();
+    responseNetwork_in.dequeue(clockEdge());
   }
 
   action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
-    memQueue_in.dequeue();
+    memQueue_in.dequeue(clockEdge());
   }
 
   action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
@@ -322,7 +324,7 @@ machine(Directory, "MESI Two Level directory protocol")
   }
 
   action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
@@ -359,7 +361,7 @@ machine(Directory, "MESI Two Level directory protocol")
   }
 
   action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
-    requestNetwork_in.recycle();
+    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
index cbd32cd44fefa141cf317a75b8bd0eda892b78f0..84774ede8ad77626db46ac28201f2625ecdb82d6 100644 (file)
@@ -51,6 +51,7 @@ machine(DMA, "DMA Controller")
   }
 
   State cur_state;
+  Tick clockEdge();
 
   State getState(Addr addr) {
     return cur_state;
@@ -78,7 +79,7 @@ machine(DMA, "DMA Controller")
   out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, SequencerMsg) {
         if (in_msg.Type == SequencerRequestType:LD ) {
           trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -92,7 +93,7 @@ machine(DMA, "DMA Controller")
   }
 
   in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
-    if (dmaResponseQueue_in.isReady()) {
+    if (dmaResponseQueue_in.isReady(clockEdge())) {
       peek( dmaResponseQueue_in, ResponseMsg) {
         if (in_msg.Type == CoherenceResponseType:ACK) {
           trigger(Event:Ack, makeLineAddress(in_msg.addr));
@@ -142,11 +143,11 @@ machine(DMA, "DMA Controller")
   }
 
   action(p_popRequestQueue, "p", desc="Pop request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(p_popResponseQueue, "\p", desc="Pop request queue") {
-    dmaResponseQueue_in.dequeue();
+    dmaResponseQueue_in.dequeue(clockEdge());
   }
 
   transition(READY, ReadRequest, BUSY_RD) {
index 3341066150602248f7a2b92e5f84f12ab3e4016d..0a15704942fe75f19e6447824ffc610f46b31f0e 100644 (file)
@@ -103,6 +103,8 @@ machine(L1Cache, "MI Example L1 Cache")
   TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
 
   // PROTOTYPES
+  Tick clockEdge();
+  Cycles ticksToCycles(Tick t);
   void set_cache_entry(AbstractCacheEntry a);
   void unset_cache_entry();
   void set_tbe(TBE b);
@@ -200,7 +202,7 @@ machine(L1Cache, "MI Example L1 Cache")
   out_port(responseNetwork_out, ResponseMsg, responseFromCache);
 
   in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
-    if (forwardRequestNetwork_in.isReady()) {
+    if (forwardRequestNetwork_in.isReady(clockEdge())) {
       peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -226,7 +228,7 @@ machine(L1Cache, "MI Example L1 Cache")
   }
 
   in_port(responseNetwork_in, ResponseMsg, responseToCache) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -244,7 +246,7 @@ machine(L1Cache, "MI Example L1 Cache")
 
     // Mandatory Queue
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         Entry cache_entry := getCacheEntry(in_msg.LineAddress);
@@ -330,15 +332,17 @@ machine(L1Cache, "MI Example L1 Cache")
   }
 
   action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(n_popResponseQueue, "n", desc="Pop the response queue") {
-    profileMsgDelay(1, responseNetwork_in.dequeue());
+    Tick delay := responseNetwork_in.dequeue(clockEdge());
+    profileMsgDelay(1, ticksToCycles(delay));
   }
 
   action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
-    profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
+    Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
+    profileMsgDelay(2, ticksToCycles(delay));
   }
 
   action(p_profileMiss, "pi", desc="Profile cache miss") {
index bb437390172da76f36fe7992577239cd8de56483..f12e474b0d938e14d5ffebf31f1d275912710e4b 100644 (file)
@@ -108,6 +108,9 @@ machine(Directory, "Directory protocol")
   // ** OBJECTS **
   TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
 
+  Tick clockEdge();
+  Cycles ticksToCycles(Tick t);
+  Tick cyclesToTicks(Cycles c);
   void set_tbe(TBE b);
   void unset_tbe();
 
@@ -204,7 +207,7 @@ machine(Directory, "Directory protocol")
 
   // ** IN_PORTS **
   in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, DMARequestMsg) {
         TBE tbe := TBEs[in_msg.LineAddress];
         if (in_msg.Type == DMARequestType:READ) {
@@ -219,7 +222,7 @@ machine(Directory, "Directory protocol")
   }
 
   in_port(requestQueue_in, RequestMsg, requestToDir) {
-    if (requestQueue_in.isReady()) {
+    if (requestQueue_in.isReady(clockEdge())) {
       peek(requestQueue_in, RequestMsg) {
         TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == CoherenceRequestType:GETS) {
@@ -242,7 +245,7 @@ machine(Directory, "Directory protocol")
 //added by SS
   // off-chip memory request/response is done
   in_port(memQueue_in, MemoryMsg, responseFromMemory) {
-    if (memQueue_in.isReady()) {
+    if (memQueue_in.isReady(clockEdge())) {
       peek(memQueue_in, MemoryMsg) {
         TBE tbe := TBEs[in_msg.addr];
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
@@ -392,11 +395,11 @@ machine(Directory, "Directory protocol")
   }
 
   action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
-    requestQueue_in.dequeue();
+    requestQueue_in.dequeue(clockEdge());
   }
 
   action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
   
   action(v_allocateTBE, "v", desc="Allocate TBE") {
@@ -432,11 +435,11 @@ machine(Directory, "Directory protocol")
   }
 
   action(z_recycleRequestQueue, "z", desc="recycle request queue") {
-    requestQueue_in.recycle();
+    requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
-    dmaRequestQueue_in.recycle();
+    dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
 
@@ -476,7 +479,7 @@ machine(Directory, "Directory protocol")
   }
 
   action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
-    memQueue_in.dequeue();
+    memQueue_in.dequeue(clockEdge());
   }
 
   // TRANSITIONS
index ce7b446305ab439fcdbecbba34516c75cdeb1fde..76d87516ad1f8faa201faaffa81a2ff8160a9228 100644 (file)
@@ -52,6 +52,9 @@ machine(DMA, "DMA Controller")
 
   State cur_state;
 
+  Tick clockEdge();
+  Cycles ticksToCycles(Tick t);
+
   State getState(Addr addr) {
     return cur_state;
   }
@@ -78,7 +81,7 @@ machine(DMA, "DMA Controller")
   out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, SequencerMsg) {
         if (in_msg.Type == SequencerRequestType:LD ) {
           trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -92,7 +95,7 @@ machine(DMA, "DMA Controller")
   }
 
   in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
-    if (dmaResponseQueue_in.isReady()) {
+    if (dmaResponseQueue_in.isReady(clockEdge())) {
       peek( dmaResponseQueue_in, DMAResponseMsg) {
         if (in_msg.Type == DMAResponseType:ACK) {
           trigger(Event:Ack, in_msg.LineAddress);
@@ -148,11 +151,11 @@ machine(DMA, "DMA Controller")
   }
 
   action(p_popRequestQueue, "p", desc="Pop request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(p_popResponseQueue, "\p", desc="Pop request queue") {
-    dmaResponseQueue_in.dequeue();
+    dmaResponseQueue_in.dequeue(clockEdge());
   }
 
   transition(READY, ReadRequest, BUSY_RD) {
index 2ef80efd2d4d794c040db4799c33542701264e28..1b1fd4ac7d155256f939f4845ae4fc9392dd8540 100644 (file)
@@ -133,6 +133,8 @@ machine(L1Cache, "Directory protocol")
     bool isPresent(Addr);
   }
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
   void set_cache_entry(AbstractCacheEntry b);
   void unset_cache_entry();
   void set_tbe(TBE b);
@@ -266,16 +268,16 @@ machine(L1Cache, "Directory protocol")
 
   // Use Timer
   in_port(useTimerTable_in, Addr, useTimerTable) {
-    if (useTimerTable_in.isReady()) {
-        trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
-                getCacheEntry(useTimerTable.readyAddress()),
-                TBEs[useTimerTable.readyAddress()]);
+    if (useTimerTable_in.isReady(clockEdge())) {
+        Addr readyAddress := useTimerTable.nextAddress();
+        trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
+                TBEs.lookup(readyAddress));
     }
   }
 
   // Trigger Queue
   in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
-    if (triggerQueue_in.isReady()) {
+    if (triggerQueue_in.isReady(clockEdge())) {
       peek(triggerQueue_in, TriggerMsg) {
         if (in_msg.Type == TriggerType:ALL_ACKS) {
           trigger(Event:All_acks, in_msg.addr,
@@ -291,7 +293,7 @@ machine(L1Cache, "Directory protocol")
 
   // Request Network
   in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
-    if (requestNetwork_in.isReady()) {
+    if (requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg, block_on="addr") {
         assert(in_msg.Destination.isElement(machineID));
         DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
@@ -331,7 +333,7 @@ machine(L1Cache, "Directory protocol")
 
   // Response Network
   in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
-    if (responseToL1Cache_in.isReady()) {
+    if (responseToL1Cache_in.isReady(clockEdge())) {
       peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
         if (in_msg.Type == CoherenceResponseType:ACK) {
           trigger(Event:Ack, in_msg.addr,
@@ -352,7 +354,7 @@ machine(L1Cache, "Directory protocol")
   // Nothing from the unblock network
   // Mandatory Queue betweens Node's CPU and it's L1 caches
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -684,7 +686,7 @@ machine(L1Cache, "Directory protocol")
   }
 
   action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
-    triggerQueue_in.dequeue();
+    triggerQueue_in.dequeue(clockEdge());
   }
 
   action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
@@ -692,11 +694,11 @@ machine(L1Cache, "Directory protocol")
   }
 
   action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
@@ -715,7 +717,7 @@ machine(L1Cache, "Directory protocol")
   }
 
   action(n_popResponseQueue, "n", desc="Pop response queue") {
-    responseToL1Cache_in.dequeue();
+    responseToL1Cache_in.dequeue(clockEdge());
   }
 
   action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
@@ -729,7 +731,8 @@ machine(L1Cache, "Directory protocol")
   }
 
   action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
-    useTimerTable.set(address, use_timeout_latency);
+    useTimerTable.set(address,
+                      clockEdge() + cyclesToTicks(use_timeout_latency));
   }
 
   action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
@@ -908,11 +911,11 @@ machine(L1Cache, "Directory protocol")
   }
 
   action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
-    requestNetwork_in.recycle();
+    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
-    mandatoryQueue_in.recycle();
+    mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   //*****************************************************
index 0b288709ef9218d456eae8903c30a651c46bf9c9..84fb276e3d268eb81fd968824a8f17acb7efe1cb 100644 (file)
@@ -227,6 +227,8 @@ machine(L2Cache, "Token protocol")
   TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
   PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
   void set_cache_entry(AbstractCacheEntry b);
   void unset_cache_entry();
   void set_tbe(TBE b);
@@ -577,7 +579,7 @@ machine(L2Cache, "Token protocol")
 
   // Trigger Queue
   in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
-    if (triggerQueue_in.isReady()) {
+    if (triggerQueue_in.isReady(clockEdge())) {
       peek(triggerQueue_in, TriggerMsg) {
         if (in_msg.Type == TriggerType:ALL_ACKS) {
           trigger(Event:All_Acks, in_msg.addr,
@@ -592,7 +594,7 @@ machine(L2Cache, "Token protocol")
 
   // Request Network
   in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
-    if (requestNetwork_in.isReady()) {
+    if (requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg) {
         if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
           if (in_msg.Requestor == machineID) {
@@ -625,7 +627,7 @@ machine(L2Cache, "Token protocol")
   }
 
   in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
-    if (L1requestNetwork_in.isReady()) {
+    if (L1requestNetwork_in.isReady(clockEdge())) {
       peek(L1requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceRequestType:GETX) {
@@ -660,7 +662,7 @@ machine(L2Cache, "Token protocol")
 
   // Response Network
   in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceResponseType:ACK) {
@@ -1366,7 +1368,7 @@ machine(L2Cache, "Token protocol")
   }
 
   action(m_popRequestQueue, "m", desc="Pop request queue.") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
@@ -1391,15 +1393,15 @@ machine(L2Cache, "Token protocol")
   }
 
   action(n_popResponseQueue, "n", desc="Pop response queue") {
-    responseNetwork_in.dequeue();
+    responseNetwork_in.dequeue(clockEdge());
   }
 
   action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
-    triggerQueue_in.dequeue();
+    triggerQueue_in.dequeue(clockEdge());
   }
 
   action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
-    L1requestNetwork_in.dequeue();
+    L1requestNetwork_in.dequeue(clockEdge());
   }
 
 
@@ -1538,21 +1540,21 @@ machine(L2Cache, "Token protocol")
     peek(L1requestNetwork_in, RequestMsg) {
       APPEND_TRANSITION_COMMENT(in_msg.Requestor);
     }
-    L1requestNetwork_in.recycle();
+    L1requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(zz_recycleRequestQueue, "\zz", desc="Send the head of the mandatory queue to the back of the queue.") {
     peek(requestNetwork_in, RequestMsg) {
       APPEND_TRANSITION_COMMENT(in_msg.Requestor);
     }
-    requestNetwork_in.recycle();
+    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(zz_recycleResponseQueue, "\z\z", desc="Send the head of the mandatory queue to the back of the queue.") {
     peek(responseNetwork_in, ResponseMsg) {
       APPEND_TRANSITION_COMMENT(in_msg.Sender);
     }
-    responseNetwork_in.recycle();
+    responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
index 6ee7cd2605db0705eb022bd716219e851a608997..7175edc8d08bd89618e4e0d3b0b005973c4fd2fa 100644 (file)
@@ -119,6 +119,8 @@ machine(Directory, "Directory protocol")
   // ** OBJECTS **
   TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
   void set_tbe(TBE b);
   void unset_tbe();
 
@@ -228,7 +230,7 @@ machine(Directory, "Directory protocol")
   // ** IN_PORTS **
 
   in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
-    if (unblockNetwork_in.isReady()) {
+    if (unblockNetwork_in.isReady(clockEdge())) {
       peek(unblockNetwork_in, ResponseMsg) {
         if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
           if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
@@ -261,7 +263,7 @@ machine(Directory, "Directory protocol")
   }
 
   in_port(requestQueue_in, RequestMsg, requestToDir) {
-    if (requestQueue_in.isReady()) {
+    if (requestQueue_in.isReady(clockEdge())) {
       peek(requestQueue_in, RequestMsg) {
         if (in_msg.Type == CoherenceRequestType:GETS) {
           trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
@@ -288,7 +290,7 @@ machine(Directory, "Directory protocol")
 
   // off-chip memory request/response is done
   in_port(memQueue_in, MemoryMsg, responseFromMemory) {
-    if (memQueue_in.isReady()) {
+    if (memQueue_in.isReady(clockEdge())) {
       peek(memQueue_in, MemoryMsg) {
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
           trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -438,11 +440,11 @@ machine(Directory, "Directory protocol")
   }
 
   action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
-    requestQueue_in.dequeue();
+    requestQueue_in.dequeue(clockEdge());
   }
 
   action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
-    unblockNetwork_in.dequeue();
+    unblockNetwork_in.dequeue(clockEdge());
   }
 
   action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
@@ -461,7 +463,7 @@ machine(Directory, "Directory protocol")
   }
 
   action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
-    memQueue_in.dequeue();
+    memQueue_in.dequeue(clockEdge());
   }
 
   action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
@@ -501,7 +503,7 @@ machine(Directory, "Directory protocol")
   }
 
   action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
-    requestQueue_in.recycle();
+    requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
index 10fc94abe486878b09f329a2b61dd2474b56a8c3..72dec64662dec53215d0f10f1b9b44ea7a805f6a 100644 (file)
@@ -74,6 +74,7 @@ machine(DMA, "DMA Controller")
   TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
   State cur_state;
 
+  Tick clockEdge();
   void set_tbe(TBE b);
   void unset_tbe();
 
@@ -104,7 +105,7 @@ machine(DMA, "DMA Controller")
   out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, SequencerMsg) {
         if (in_msg.Type == SequencerRequestType:LD ) {
           trigger(Event:ReadRequest, in_msg.LineAddress,
@@ -120,7 +121,7 @@ machine(DMA, "DMA Controller")
   }
 
   in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
-    if (dmaResponseQueue_in.isReady()) {
+    if (dmaResponseQueue_in.isReady(clockEdge())) {
       peek( dmaResponseQueue_in, ResponseMsg) {
         if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
           trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
@@ -141,7 +142,7 @@ machine(DMA, "DMA Controller")
 
   // Trigger Queue
   in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
-    if (triggerQueue_in.isReady()) {
+    if (triggerQueue_in.isReady(clockEdge())) {
       peek(triggerQueue_in, TriggerMsg) {
         if (in_msg.Type == TriggerType:ALL_ACKS) {
           trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
@@ -215,15 +216,15 @@ machine(DMA, "DMA Controller")
   }
 
   action(p_popRequestQueue, "p", desc="Pop request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(p_popResponseQueue, "\p", desc="Pop request queue") {
-    dmaResponseQueue_in.dequeue();
+    dmaResponseQueue_in.dequeue(clockEdge());
   }
 
   action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
-    triggerQueue_in.dequeue();
+    triggerQueue_in.dequeue(clockEdge());
   }
 
   action(t_updateTBEData, "t", desc="Update TBE Data") {
index 230adfc4b59141f86e3d15e40c85c1f084adf94b..dac2027b9596ddf2ecab79332f3e4898527706fe 100644 (file)
@@ -184,6 +184,8 @@ machine(L1Cache, "Token protocol")
     int countReadStarvingForAddress(Addr);
   }
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
   void set_cache_entry(AbstractCacheEntry b);
   void unset_cache_entry();
   void set_tbe(TBE b);
@@ -456,25 +458,26 @@ machine(L1Cache, "Token protocol")
 
   // Use Timer
   in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
-    if (useTimerTable_in.isReady()) {
-      TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
-
-      if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
-          (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
-        if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
-          trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
-                  getCacheEntry(useTimerTable.readyAddress()), tbe);
+    if (useTimerTable_in.isReady(clockEdge())) {
+      Addr readyAddress := useTimerTable.nextAddress();
+      TBE tbe := L1_TBEs.lookup(readyAddress);
+
+      if (persistentTable.isLocked(readyAddress) &&
+          (persistentTable.findSmallest(readyAddress) != machineID)) {
+        if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
+          trigger(Event:Use_TimeoutStarverX, readyAddress,
+                  getCacheEntry(readyAddress), tbe);
         } else {
-          trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
-                  getCacheEntry(useTimerTable.readyAddress()), tbe);
+          trigger(Event:Use_TimeoutStarverS, readyAddress,
+                  getCacheEntry(readyAddress), tbe);
         }
       } else {
         if (no_mig_atomic && IsAtomic(tbe)) {
-          trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
-                  getCacheEntry(useTimerTable.readyAddress()), tbe);
+          trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
+                  getCacheEntry(readyAddress), tbe);
         } else {
-          trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
-                  getCacheEntry(useTimerTable.readyAddress()), tbe);
+          trigger(Event:Use_TimeoutNoStarvers, readyAddress,
+                  getCacheEntry(readyAddress), tbe);
         }
       }
     }
@@ -482,16 +485,17 @@ machine(L1Cache, "Token protocol")
 
   // Reissue Timer
   in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
-    if (reissueTimerTable_in.isReady()) {
-      trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
-              getCacheEntry(reissueTimerTable.readyAddress()),
-              L1_TBEs[reissueTimerTable.readyAddress()]);
+    Tick current_time := clockEdge();
+    if (reissueTimerTable_in.isReady(current_time)) {
+      Addr addr := reissueTimerTable.nextAddress();
+      trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
+              L1_TBEs.lookup(addr));
     }
   }
 
   // Persistent Network
   in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
-    if (persistentNetwork_in.isReady()) {
+    if (persistentNetwork_in.isReady(clockEdge())) {
       peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -541,7 +545,7 @@ machine(L1Cache, "Token protocol")
 
   // Response Network
   in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg, block_on="addr") {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -612,7 +616,7 @@ machine(L1Cache, "Token protocol")
 
   // Request Network
   in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
-    if (requestNetwork_in.isReady()) {
+    if (requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg, block_on="addr") {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -659,7 +663,7 @@ machine(L1Cache, "Token protocol")
 
   // Mandatory Queue
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
 
@@ -792,7 +796,8 @@ machine(L1Cache, "Token protocol")
           // IssueCount.
 
           // Set a wakeup timer
-          reissueTimerTable.set(address, reissue_wakeup_latency);
+          reissueTimerTable.set(
+            address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
 
         }
       } else {
@@ -844,9 +849,11 @@ machine(L1Cache, "Token protocol")
         // Set a wakeup timer
 
         if (dynamic_timeout_enabled) {
-          reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+          reissueTimerTable.set(
+            address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
         } else {
-          reissueTimerTable.set(address, fixed_timeout_latency);
+          reissueTimerTable.set(
+            address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
         }
 
       }
@@ -911,7 +918,8 @@ machine(L1Cache, "Token protocol")
           // IssueCount.
 
           // Set a wakeup timer
-          reissueTimerTable.set(address, reissue_wakeup_latency);
+          reissueTimerTable.set(
+            address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
         }
 
       } else  {
@@ -968,9 +976,11 @@ machine(L1Cache, "Token protocol")
 
         // Set a wakeup timer
         if (dynamic_timeout_enabled) {
-          reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+          reissueTimerTable.set(
+            address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
         } else {
-          reissueTimerTable.set(address, fixed_timeout_latency);
+          reissueTimerTable.set(
+            address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
         }
       }
   }
@@ -1376,23 +1386,24 @@ machine(L1Cache, "Token protocol")
   }
 
   action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
-    persistentNetwork_in.dequeue();
+    persistentNetwork_in.dequeue(clockEdge());
   }
 
   action(m_popRequestQueue, "m", desc="Pop request queue.") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(n_popResponseQueue, "n", desc="Pop response queue") {
-    responseNetwork_in.dequeue();
+    responseNetwork_in.dequeue(clockEdge());
   }
 
   action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
-    useTimerTable.set(address, use_timeout_latency);
+    useTimerTable.set(
+        address, clockEdge() + cyclesToTicks(use_timeout_latency));
   }
 
   action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
index 52bd19bccfda9ec5a551c4f59cedc25d1d4a7f8c..2ab593394df6e5bd76c83d44dcb371ed8bf33202 100644 (file)
@@ -149,6 +149,7 @@ machine(L2Cache, "Token protocol")
   PersistentTable persistentTable;
   PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
 
+  Tick clockEdge();
   void set_cache_entry(AbstractCacheEntry b);
   void unset_cache_entry();
 
@@ -326,7 +327,7 @@ machine(L2Cache, "Token protocol")
 
   // Persistent Network
   in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
-    if (persistentNetwork_in.isReady()) {
+    if (persistentNetwork_in.isReady(clockEdge())) {
       peek(persistentNetwork_in, PersistentMsg) {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -366,7 +367,7 @@ machine(L2Cache, "Token protocol")
 
   // Request Network
   in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
-    if (requestNetwork_in.isReady()) {
+    if (requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -389,7 +390,7 @@ machine(L2Cache, "Token protocol")
   }
 
   in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
-    if (L1requestNetwork_in.isReady()) {
+    if (L1requestNetwork_in.isReady(clockEdge())) {
       peek(L1requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -413,7 +414,7 @@ machine(L2Cache, "Token protocol")
 
   // Response Network
   in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg) {
         assert(in_msg.Destination.isElement(machineID));
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -870,19 +871,19 @@ machine(L2Cache, "Token protocol")
   }
 
   action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
-    persistentNetwork_in.dequeue();
+    persistentNetwork_in.dequeue(clockEdge());
   }
 
   action(m_popRequestQueue, "m", desc="Pop request queue.") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(n_popResponseQueue, "n", desc="Pop response queue") {
-    responseNetwork_in.dequeue();
+    responseNetwork_in.dequeue(clockEdge());
   }
 
   action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
-    L1requestNetwork_in.dequeue();
+    L1requestNetwork_in.dequeue(clockEdge());
   }
 
 
index ffef01eb07056d6b37fbb22eebb89a2bbb4ae9f1..63790531fdee00e4eaad69186376e334ce569ef9 100644 (file)
@@ -172,6 +172,8 @@ machine(Directory, "Token protocol")
   bool starving, default="false";
   int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
 
+  Tick clockEdge();
+  Tick cyclesToTicks(Cycles c);
   void set_tbe(TBE b);
   void unset_tbe();
 
@@ -276,7 +278,7 @@ machine(Directory, "Token protocol")
   // ** IN_PORTS **
   // off-chip memory request/response is done
   in_port(memQueue_in, MemoryMsg, responseFromMemory) {
-    if (memQueue_in.isReady()) {
+    if (memQueue_in.isReady(clockEdge())) {
       peek(memQueue_in, MemoryMsg) {
         if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
           trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -292,14 +294,15 @@ machine(Directory, "Token protocol")
 
   // Reissue Timer
   in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
-    if (reissueTimerTable_in.isReady()) {
-      trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
-              TBEs[reissueTimerTable.readyAddress()]);
+    Tick current_time := clockEdge();
+    if (reissueTimerTable_in.isReady(current_time)) {
+      Addr addr := reissueTimerTable.nextAddress();
+      trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
     }
   }
 
   in_port(responseNetwork_in, ResponseMsg, responseToDir) {
-    if (responseNetwork_in.isReady()) {
+    if (responseNetwork_in.isReady(clockEdge())) {
       peek(responseNetwork_in, ResponseMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
@@ -338,7 +341,7 @@ machine(Directory, "Token protocol")
   }
 
   in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
-    if (persistentNetwork_in.isReady()) {
+    if (persistentNetwork_in.isReady(clockEdge())) {
       peek(persistentNetwork_in, PersistentMsg) {
         assert(in_msg.Destination.isElement(machineID));
 
@@ -400,7 +403,7 @@ machine(Directory, "Token protocol")
   }
 
   in_port(requestNetwork_in, RequestMsg, requestToDir) {
-    if (requestNetwork_in.isReady()) {
+    if (requestNetwork_in.isReady(clockEdge())) {
       peek(requestNetwork_in, RequestMsg) {
         assert(in_msg.Destination.isElement(machineID));
         if (in_msg.Type == CoherenceRequestType:GETS) {
@@ -415,7 +418,7 @@ machine(Directory, "Token protocol")
   }
 
   in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, DMARequestMsg) {
         if (in_msg.Type == DMARequestType:READ) {
           trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
@@ -490,7 +493,7 @@ machine(Directory, "Token protocol")
       // IssueCount.
 
       // Set a wakeup timer
-      reissueTimerTable.set(address, reissue_wakeup_latency);
+      reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
     }
   }
 
@@ -558,7 +561,7 @@ machine(Directory, "Token protocol")
       // IssueCount.
 
       // Set a wakeup timer
-      reissueTimerTable.set(address, reissue_wakeup_latency);
+      reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
     }
   }
 
@@ -752,35 +755,35 @@ machine(Directory, "Token protocol")
   }
 
   action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
-    requestNetwork_in.dequeue();
+    requestNetwork_in.dequeue(clockEdge());
   }
 
   action(z_recycleRequest, "z", desc="Recycle the request queue") {
-    requestNetwork_in.recycle();
+    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
-    responseNetwork_in.dequeue();
+    responseNetwork_in.dequeue(clockEdge());
   }
 
   action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
-    responseNetwork_in.recycle();
+    responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
-    persistentNetwork_in.dequeue();
+    persistentNetwork_in.dequeue(clockEdge());
   }
 
   action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
-    dmaRequestQueue_in.recycle();
+    dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
   }
 
   action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
-    memQueue_in.dequeue();
+    memQueue_in.dequeue(clockEdge());
   }
 
   action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
@@ -804,7 +807,7 @@ machine(Directory, "Token protocol")
     //
     if (reissueTimerTable.isSet(address)) {
       reissueTimerTable.unset(address);
-      reissueTimerTable.set(address, fixed_timeout_latency);
+      reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
     }
   }
 
@@ -812,7 +815,7 @@ machine(Directory, "Token protocol")
     //
     // currently only support a fixed timeout latency
     //
-    reissueTimerTable.set(address, fixed_timeout_latency);
+    reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
   }
 
   action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
index 4bb80d4bafc145602d36733916fbd7f74e9b874f..efe3db3cd800086fc9422177b6ca9b626a4e3c9f 100644 (file)
@@ -54,6 +54,8 @@ machine(DMA, "DMA Controller")
 
   State cur_state;
 
+  Tick clockEdge();
+
   State getState(Addr addr) {
     return cur_state;
   }
@@ -80,7 +82,7 @@ machine(DMA, "DMA Controller")
   out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, SequencerMsg) {
         if (in_msg.Type == SequencerRequestType:LD ) {
           trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -94,7 +96,7 @@ machine(DMA, "DMA Controller")
   }
 
   in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
-    if (dmaResponseQueue_in.isReady()) {
+    if (dmaResponseQueue_in.isReady(clockEdge())) {
       peek( dmaResponseQueue_in, DMAResponseMsg) {
         if (in_msg.Type == DMAResponseType:ACK) {
           trigger(Event:Ack, in_msg.LineAddress);
@@ -150,11 +152,11 @@ machine(DMA, "DMA Controller")
   }
 
   action(p_popRequestQueue, "p", desc="Pop request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(p_popResponseQueue, "\p", desc="Pop request queue") {
-    dmaResponseQueue_in.dequeue();
+    dmaResponseQueue_in.dequeue(clockEdge());
   }
 
   transition(READY, ReadRequest, BUSY_RD) {
index 88b7308ed964ba5a616da409883c38021ec04612..5d23835410b3067de34544f778e33360fc56a701 100644 (file)
@@ -181,6 +181,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
 
   TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
 
+  Tick clockEdge();
   void set_cache_entry(AbstractCacheEntry b);
   void unset_cache_entry();
   void set_tbe(TBE b);
@@ -329,7 +330,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
 
   // Trigger Queue
   in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
-    if (triggerQueue_in.isReady()) {
+    if (triggerQueue_in.isReady(clockEdge())) {
       peek(triggerQueue_in, TriggerMsg) {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -352,7 +353,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
 
   // Response Network
   in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
-    if (responseToCache_in.isReady()) {
+    if (responseToCache_in.isReady(clockEdge())) {
       peek(responseToCache_in, ResponseMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -377,7 +378,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
 
   // Forward Network
   in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
-    if (forwardToCache_in.isReady()) {
+    if (forwardToCache_in.isReady(clockEdge())) {
       peek(forwardToCache_in, RequestMsg, block_on="addr") {
 
         Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -421,7 +422,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
 
   // Mandatory Queue
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -950,15 +951,15 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
-    triggerQueue_in.dequeue();
+    triggerQueue_in.dequeue(clockEdge());
   }
 
   action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
-    forwardToCache_in.dequeue();
+    forwardToCache_in.dequeue(clockEdge());
   }
 
   action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
@@ -1017,7 +1018,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
   }
 
   action(n_popResponseQueue, "n", desc="Pop response queue") {
-    responseToCache_in.dequeue();
+    responseToCache_in.dequeue(clockEdge());
   }
 
   action(ll_L2toL1Transfer, "ll", desc="") {
index 4948a8108822d3504d11694d126df40f2eede5ce..4f5b00658e32397cc181e865cf9f9ac8d74868f9 100644 (file)
@@ -184,6 +184,7 @@ machine(Directory, "AMD Hammer-like protocol")
     bool isPresent(Addr);
   }
 
+  Tick clockEdge();
   void set_cache_entry(AbstractCacheEntry b);
   void unset_cache_entry();
   void set_tbe(TBE a);
@@ -314,7 +315,7 @@ machine(Directory, "AMD Hammer-like protocol")
   
   // Trigger Queue
   in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
-    if (triggerQueue_in.isReady()) {
+    if (triggerQueue_in.isReady(clockEdge())) {
       peek(triggerQueue_in, TriggerMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -338,7 +339,7 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
-    if (unblockNetwork_in.isReady()) {
+    if (unblockNetwork_in.isReady(clockEdge())) {
       peek(unblockNetwork_in, ResponseMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -367,7 +368,7 @@ machine(Directory, "AMD Hammer-like protocol")
 
   // Response Network
   in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
-    if (responseToDir_in.isReady()) {
+    if (responseToDir_in.isReady(clockEdge())) {
       peek(responseToDir_in, ResponseMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -390,7 +391,7 @@ machine(Directory, "AMD Hammer-like protocol")
 
   // off-chip memory request/response is done
   in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
-    if (memQueue_in.isReady()) {
+    if (memQueue_in.isReady(clockEdge())) {
       peek(memQueue_in, MemoryMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -407,7 +408,7 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
-    if (requestQueue_in.isReady()) {
+    if (requestQueue_in.isReady(clockEdge())) {
       peek(requestQueue_in, RequestMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
         TBE tbe := TBEs[in_msg.addr];
@@ -441,7 +442,7 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, DMARequestMsg) {
         PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
         TBE tbe := TBEs[in_msg.LineAddress];
@@ -682,7 +683,7 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   action(n_popResponseQueue, "n", desc="Pop response queue") {
-    responseToDir_in.dequeue();
+    responseToDir_in.dequeue(clockEdge());
   }
 
   action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
@@ -1115,14 +1116,14 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
-    requestQueue_in.dequeue();
+    requestQueue_in.dequeue(clockEdge());
   }
 
   action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
     peek(unblockNetwork_in, ResponseMsg) {
         APPEND_TRANSITION_COMMENT(in_msg.Sender);
     } 
-    unblockNetwork_in.dequeue();
+    unblockNetwork_in.dequeue(clockEdge());
   }
 
   action(k_wakeUpDependents, "k", desc="wake-up dependents") {
@@ -1130,15 +1131,15 @@ machine(Directory, "AMD Hammer-like protocol")
   }
 
   action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
-    memQueue_in.dequeue();
+    memQueue_in.dequeue(clockEdge());
   }
 
   action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
-    triggerQueue_in.dequeue();
+    triggerQueue_in.dequeue(clockEdge());
   }
 
   action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
index 4691e2490b647f6f2e9d1c1000fb19f634e0bdd0..7157082c44a711488529fcdb2662e359f801b257 100644 (file)
@@ -52,6 +52,8 @@ machine(DMA, "DMA Controller")
 
   State cur_state;
 
+  Tick clockEdge();
+
   State getState(Addr addr) {
     return cur_state;
   }
@@ -77,7 +79,7 @@ machine(DMA, "DMA Controller")
   out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
 
   in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
-    if (dmaRequestQueue_in.isReady()) {
+    if (dmaRequestQueue_in.isReady(clockEdge())) {
       peek(dmaRequestQueue_in, SequencerMsg) {
         if (in_msg.Type == SequencerRequestType:LD ) {
           trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -91,7 +93,7 @@ machine(DMA, "DMA Controller")
   }
 
   in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
-    if (dmaResponseQueue_in.isReady()) {
+    if (dmaResponseQueue_in.isReady(clockEdge())) {
       peek( dmaResponseQueue_in, DMAResponseMsg) {
         if (in_msg.Type == DMAResponseType:ACK) {
           trigger(Event:Ack, in_msg.LineAddress);
@@ -147,11 +149,11 @@ machine(DMA, "DMA Controller")
   }
 
   action(p_popRequestQueue, "p", desc="Pop request queue") {
-    dmaRequestQueue_in.dequeue();
+    dmaRequestQueue_in.dequeue(clockEdge());
   }
 
   action(p_popResponseQueue, "\p", desc="Pop request queue") {
-    dmaResponseQueue_in.dequeue();
+    dmaResponseQueue_in.dequeue(clockEdge());
   }
 
   transition(READY, ReadRequest, BUSY_RD) {
index 82829a6eaaf5900c2d3312ba2c2088e400a41b9f..dab8f1089ff84ff7b9c31bdc67f68dfe7fa549cc 100644 (file)
@@ -68,6 +68,7 @@ machine(L1Cache, "Network_test L1 Cache")
   }
 
   // FUNCTIONS
+  Tick clockEdge();
 
   // cpu/testers/networktest/networktest.cc generates packets of the type
   // ReadReq, INST_FETCH, and WriteReq.
@@ -129,7 +130,7 @@ machine(L1Cache, "Network_test L1 Cache")
 
   // Mandatory Queue
   in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
-    if (mandatoryQueue_in.isReady()) {
+    if (mandatoryQueue_in.isReady(clockEdge())) {
       peek(mandatoryQueue_in, RubyRequest) {
         trigger(mandatory_request_type_to_event(in_msg.Type),
                 in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
@@ -174,7 +175,7 @@ machine(L1Cache, "Network_test L1 Cache")
   }
 
   action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
-    mandatoryQueue_in.dequeue();
+    mandatoryQueue_in.dequeue(clockEdge());
   }
 
   action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
index d618e98ff341de36289ca8d1a2278f7bb3312110..6bd6920b34e444bc7698234941a3233afa021532 100644 (file)
@@ -60,7 +60,9 @@ machine(Directory, "Network_test Directory")
     DataBlock DataBlk,             desc="data for the block";
   }
 
-  // ** OBJECTS **
+  // ** FUNCTIONS **
+  Tick clockEdge();
+
   State getState(Addr addr) {
     return State:I;
   }
@@ -87,7 +89,7 @@ machine(Directory, "Network_test Directory")
   // ** IN_PORTS **
 
   in_port(requestQueue_in, RequestMsg, requestToDir) {
-    if (requestQueue_in.isReady()) {
+    if (requestQueue_in.isReady(clockEdge())) {
       peek(requestQueue_in, RequestMsg) {
         if (in_msg.Type == CoherenceRequestType:MSG) {
           trigger(Event:Receive_Request, in_msg.addr);
@@ -98,7 +100,7 @@ machine(Directory, "Network_test Directory")
     }
   }
   in_port(forwardQueue_in, RequestMsg, forwardToDir) {
-    if (forwardQueue_in.isReady()) {
+    if (forwardQueue_in.isReady(clockEdge())) {
       peek(forwardQueue_in, RequestMsg) {
         if (in_msg.Type == CoherenceRequestType:MSG) {
           trigger(Event:Receive_Forward, in_msg.addr);
@@ -109,7 +111,7 @@ machine(Directory, "Network_test Directory")
     }
   }
   in_port(responseQueue_in, RequestMsg, responseToDir) {
-    if (responseQueue_in.isReady()) {
+    if (responseQueue_in.isReady(clockEdge())) {
       peek(responseQueue_in, RequestMsg) {
         if (in_msg.Type == CoherenceRequestType:MSG) {
           trigger(Event:Receive_Response, in_msg.addr);
@@ -123,15 +125,15 @@ machine(Directory, "Network_test Directory")
   // Actions
 
   action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
-    requestQueue_in.dequeue();
+    requestQueue_in.dequeue(clockEdge());
   }
 
   action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
-    forwardQueue_in.dequeue();
+    forwardQueue_in.dequeue(clockEdge());
   }
 
   action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
-    responseQueue_in.dequeue();
+    responseQueue_in.dequeue(clockEdge());
   }
 
   // TRANSITIONS
index d4f7fa58f26f62f2f5f61b8372e473e686cf7285..eb235f8f39e25772014e37b8d19a463c9fd8aac4 100644 (file)
@@ -1,4 +1,3 @@
-
 /*
  * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
  * All rights reserved.
@@ -31,6 +30,7 @@
 NodeID version;
 MachineID machineID;
 NodeID clusterID;
+Cycles recycle_latency;
 
 // Functions implemented in the AbstractController class for
 // making timing access to the memory maintained by the
index eeec185fc84da26607aa270bf0a1878a2c94f544..7c20692032812ae5432dee8036a562d13187e799 100644 (file)
@@ -37,6 +37,7 @@ external_type(PacketPtr, primitive="yes");
 external_type(Packet, primitive="yes");
 external_type(Addr, primitive="yes");
 external_type(Cycles, primitive="yes", default="Cycles(0)");
+external_type(Tick, primitive="yes", default="0");
 
 structure(DataBlock, external = "yes", desc="..."){
   void clear();
index f464b3c7d747a21f8816ed17f2ea0becd265104f..a8bf93bccc33e687d2f2211acb05e81a52fd3b16 100644 (file)
@@ -41,9 +41,9 @@ external_type(OutPort, primitive="yes");
 external_type(Scalar, primitive="yes");
 
 structure(InPort, external = "yes", primitive="yes") {
-  bool isReady();
-  Cycles dequeue();
-  void recycle();
+  bool isReady(Tick current_time);
+  Tick dequeue(Tick current_time);
+  void recycle(Tick current_time, Tick recycle_latency);
   bool isEmpty();
   bool isStallMapEmpty();
   int getStallMapSize();
@@ -179,9 +179,9 @@ structure (DMASequencer, external = "yes") {
 }
 
 structure (TimerTable, inport="yes", external = "yes") {
-  bool isReady();
-  Addr readyAddress();
-  void set(Addr, Cycles);
+  bool isReady(Tick);
+  Addr nextAddress();
+  void set(Addr, Tick);
   void unset(Addr);
   bool isSet(Addr);
 }
index b07bdbdca729cfb65c229de445e933ebc97dbca6..35850f61eebf34c2de187e0ffe2495a951b87e27 100644 (file)
@@ -40,7 +40,7 @@ using namespace std;
 using m5::stl_helpers::operator<<;
 
 MessageBuffer::MessageBuffer(const Params *p)
-    : SimObject(p), m_recycle_latency(p->recycle_latency),
+    : SimObject(p),
     m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
     m_time_last_time_enqueue(0), m_time_last_time_pop(0),
     m_last_arrival_time(0), m_strict_fifo(p->ordered),
@@ -48,9 +48,6 @@ MessageBuffer::MessageBuffer(const Params *p)
 {
     m_msg_counter = 0;
     m_consumer = NULL;
-    m_sender = NULL;
-    m_receiver = NULL;
-
     m_size_last_time_size_checked = 0;
     m_size_at_cycle_start = 0;
     m_msgs_this_cycle = 0;
@@ -63,10 +60,10 @@ MessageBuffer::MessageBuffer(const Params *p)
 }
 
 unsigned int
-MessageBuffer::getSize()
+MessageBuffer::getSize(Tick curTime)
 {
-    if (m_time_last_time_size_checked != m_receiver->curCycle()) {
-        m_time_last_time_size_checked = m_receiver->curCycle();
+    if (m_time_last_time_size_checked != curTime) {
+        m_time_last_time_size_checked = curTime;
         m_size_last_time_size_checked = m_prio_heap.size();
     }
 
@@ -74,7 +71,7 @@ MessageBuffer::getSize()
 }
 
 bool
-MessageBuffer::areNSlotsAvailable(unsigned int n)
+MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
 {
 
     // fast path when message buffers have infinite size
@@ -88,11 +85,11 @@ MessageBuffer::areNSlotsAvailable(unsigned int n)
     // size immediately
     unsigned int current_size = 0;
 
-    if (m_time_last_time_pop < m_sender->clockEdge()) {
+    if (m_time_last_time_pop < current_time) {
         // no pops this cycle - heap size is correct
         current_size = m_prio_heap.size();
     } else {
-        if (m_time_last_time_enqueue < m_sender->curCycle()) {
+        if (m_time_last_time_enqueue < current_time) {
             // no enqueues this cycle - m_size_at_cycle_start is correct
             current_size = m_size_at_cycle_start;
         } else {
@@ -118,8 +115,6 @@ const Message*
 MessageBuffer::peek() const
 {
     DPRINTF(RubyQueue, "Peeking at head of queue.\n");
-    assert(isReady());
-
     const Message* msg_ptr = m_prio_heap.front().get();
     assert(msg_ptr);
 
@@ -128,24 +123,24 @@ MessageBuffer::peek() const
 }
 
 // FIXME - move me somewhere else
-Cycles
+Tick
 random_time()
 {
-    Cycles time(1);
-    time += Cycles(random_mt.random(0, 3));  // [0...3]
+    Tick time = 1;
+    time += random_mt.random(0, 3);  // [0...3]
     if (random_mt.random(0, 7) == 0) {  // 1 in 8 chance
-        time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
+        time += 100 + random_mt.random(1, 15); // 100 + [1...15]
     }
     return time;
 }
 
 void
-MessageBuffer::enqueue(MsgPtr message, Cycles delta)
+MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
 {
     // record current time incase we have a pop that also adjusts my size
-    if (m_time_last_time_enqueue < m_sender->curCycle()) {
+    if (m_time_last_time_enqueue < current_time) {
         m_msgs_this_cycle = 0;  // first msg this cycle
-        m_time_last_time_enqueue = m_sender->curCycle();
+        m_time_last_time_enqueue = current_time;
     }
 
     m_msg_counter++;
@@ -154,23 +149,20 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
     // Calculate the arrival time of the message, that is, the first
     // cycle the message can be dequeued.
     assert(delta > 0);
-    Tick current_time = m_sender->clockEdge();
     Tick arrival_time = 0;
 
     if (!RubySystem::getRandomization() || !m_randomization) {
         // No randomization
-        arrival_time = current_time + delta * m_sender->clockPeriod();
+        arrival_time = current_time + delta;
     } else {
         // Randomization - ignore delta
         if (m_strict_fifo) {
             if (m_last_arrival_time < current_time) {
                 m_last_arrival_time = current_time;
             }
-            arrival_time = m_last_arrival_time +
-                           random_time() * m_sender->clockPeriod();
+            arrival_time = m_last_arrival_time + random_time();
         } else {
-            arrival_time = current_time +
-                           random_time() * m_sender->clockPeriod();
+            arrival_time = current_time + random_time();
         }
     }
 
@@ -180,9 +172,8 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
         if (arrival_time < m_last_arrival_time) {
             panic("FIFO ordering violated: %s name: %s current time: %d "
                   "delta: %d arrival_time: %d last arrival_time: %d\n",
-                  *this, name(), current_time,
-                  delta * m_sender->clockPeriod(),
-                  arrival_time, m_last_arrival_time);
+                  *this, name(), current_time, delta, arrival_time,
+                  m_last_arrival_time);
         }
     }
 
@@ -195,10 +186,10 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
     Message* msg_ptr = message.get();
     assert(msg_ptr != NULL);
 
-    assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
+    assert(current_time >= msg_ptr->getLastEnqueueTime() &&
            "ensure we aren't dequeued early");
 
-    msg_ptr->updateDelayedTicks(m_sender->clockEdge());
+    msg_ptr->updateDelayedTicks(current_time);
     msg_ptr->setLastEnqueueTime(arrival_time);
     msg_ptr->setMsgCounter(m_msg_counter);
 
@@ -215,32 +206,30 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
     m_consumer->storeEventInfo(m_vnet_id);
 }
 
-Cycles
-MessageBuffer::dequeue()
+Tick
+MessageBuffer::dequeue(Tick current_time)
 {
     DPRINTF(RubyQueue, "Popping\n");
-    assert(isReady());
+    assert(isReady(current_time));
 
     // get MsgPtr of the message about to be dequeued
     MsgPtr message = m_prio_heap.front();
 
     // get the delay cycles
-    message->updateDelayedTicks(m_receiver->clockEdge());
-    Cycles delayCycles =
-        m_receiver->ticksToCycles(message->getDelayedTicks());
+    message->updateDelayedTicks(current_time);
+    Tick delay = message->getDelayedTicks();
 
     // record previous size and time so the current buffer size isn't
     // adjusted until schd cycle
-    if (m_time_last_time_pop < m_receiver->clockEdge()) {
+    if (m_time_last_time_pop < current_time) {
         m_size_at_cycle_start = m_prio_heap.size();
-        m_time_last_time_pop = m_receiver->clockEdge();
+        m_time_last_time_pop = current_time;
     }
 
-    pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
-        greater<MsgPtr>());
+    pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
     m_prio_heap.pop_back();
 
-    return delayCycles;
+    return delay;
 }
 
 void
@@ -249,25 +238,26 @@ MessageBuffer::clear()
     m_prio_heap.clear();
 
     m_msg_counter = 0;
-    m_time_last_time_enqueue = Cycles(0);
+    m_time_last_time_enqueue = 0;
     m_time_last_time_pop = 0;
     m_size_at_cycle_start = 0;
     m_msgs_this_cycle = 0;
 }
 
 void
-MessageBuffer::recycle()
+MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
 {
     DPRINTF(RubyQueue, "Recycling.\n");
-    assert(isReady());
+    assert(isReady(current_time));
     MsgPtr node = m_prio_heap.front();
     pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
 
-    node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency));
+    Tick future_time = current_time + recycle_latency;
+    node->setLastEnqueueTime(future_time);
+
     m_prio_heap.back() = node;
     push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
-    m_consumer->
-        scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
+    m_consumer->scheduleEventAbsolute(future_time);
 }
 
 void
@@ -289,11 +279,10 @@ MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
 }
 
 void
-MessageBuffer::reanalyzeMessages(Addr addr)
+MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
 {
     DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr);
     assert(m_stall_msg_map.count(addr) > 0);
-    Tick curTick = m_receiver->clockEdge();
 
     //
     // Put all stalled messages associated with this address back on the
@@ -301,15 +290,14 @@ MessageBuffer::reanalyzeMessages(Addr addr)
     // scheduled for the current cycle so that the previously stalled messages
     // will be observed before any younger messages that may arrive this cycle
     //
-    reanalyzeList(m_stall_msg_map[addr], curTick);
+    reanalyzeList(m_stall_msg_map[addr], current_time);
     m_stall_msg_map.erase(addr);
 }
 
 void
-MessageBuffer::reanalyzeAllMessages()
+MessageBuffer::reanalyzeAllMessages(Tick current_time)
 {
     DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
-    Tick curTick = m_receiver->clockEdge();
 
     //
     // Put all stalled messages associated with this address back on the
@@ -319,20 +307,20 @@ MessageBuffer::reanalyzeAllMessages()
     //
     for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
          map_iter != m_stall_msg_map.end(); ++map_iter) {
-        reanalyzeList(map_iter->second, curTick);
+        reanalyzeList(map_iter->second, current_time);
     }
     m_stall_msg_map.clear();
 }
 
 void
-MessageBuffer::stallMessage(Addr addr)
+MessageBuffer::stallMessage(Addr addr, Tick current_time)
 {
     DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
-    assert(isReady());
+    assert(isReady(current_time));
     assert(getOffset(addr) == 0);
     MsgPtr message = m_prio_heap.front();
 
-    dequeue();
+    dequeue(current_time);
 
     //
     // Note: no event is scheduled to analyze the map at a later time.
@@ -356,10 +344,10 @@ MessageBuffer::print(ostream& out) const
 }
 
 bool
-MessageBuffer::isReady() const
+MessageBuffer::isReady(Tick current_time) const
 {
     return ((m_prio_heap.size() > 0) &&
-        (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
+        (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
 }
 
 bool
index 4209aea0f1cb577a686467dd7920a6a8b689fbb6..4fdf4978d5ece8789fc80c0e37e3a239cbcbd7ce 100644 (file)
@@ -55,24 +55,24 @@ class MessageBuffer : public SimObject
     typedef MessageBufferParams Params;
     MessageBuffer(const Params *p);
 
-    void reanalyzeMessages(Addr addr);
-    void reanalyzeAllMessages();
-    void stallMessage(Addr addr);
+    void reanalyzeMessages(Addr addr, Tick current_time);
+    void reanalyzeAllMessages(Tick current_time);
+    void stallMessage(Addr addr, Tick current_time);
 
     // TRUE if head of queue timestamp <= SystemTime
-    bool isReady() const;
+    bool isReady(Tick current_time) const;
 
     void
-    delayHead()
+    delayHead(Tick current_time, Tick delta)
     {
         MsgPtr m = m_prio_heap.front();
         std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
                       std::greater<MsgPtr>());
         m_prio_heap.pop_back();
-        enqueue(m, Cycles(1));
+        enqueue(m, current_time, delta);
     }
 
-    bool areNSlotsAvailable(unsigned int n);
+    bool areNSlotsAvailable(unsigned int n, Tick curTime);
     int getPriority() { return m_priority_rank; }
     void setPriority(int rank) { m_priority_rank = rank; }
     void setConsumer(Consumer* consumer)
@@ -86,20 +86,6 @@ class MessageBuffer : public SimObject
         m_consumer = consumer;
     }
 
-    void setSender(ClockedObject* obj)
-    {
-        DPRINTF(RubyQueue, "Setting sender: %s\n", obj->name());
-        assert(m_sender == NULL || m_sender == obj);
-        m_sender = obj;
-    }
-
-    void setReceiver(ClockedObject* obj)
-    {
-        DPRINTF(RubyQueue, "Setting receiver: %s\n", obj->name());
-        assert(m_receiver == NULL || m_receiver == obj);
-        m_receiver = obj;
-    }
-
     Consumer* getConsumer() { return m_consumer; }
 
     bool getOrdered() { return m_strict_fifo; }
@@ -108,26 +94,20 @@ class MessageBuffer : public SimObject
     //! message queue.  The function assumes that the queue is nonempty.
     const Message* peek() const;
 
-    const MsgPtr&
-    peekMsgPtr() const
-    {
-        assert(isReady());
-        return m_prio_heap.front();
-    }
+    const MsgPtr &peekMsgPtr() const { return m_prio_heap.front(); }
 
-    void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
-    void enqueue(MsgPtr message, Cycles delta);
+    void enqueue(MsgPtr message, Tick curTime, Tick delta);
 
     //! Updates the delay cycles of the message at the head of the queue,
     //! removes it from the queue and returns its total delay.
-    Cycles dequeue();
+    Tick dequeue(Tick current_time);
 
-    void recycle();
+    void recycle(Tick current_time, Tick recycle_latency);
     bool isEmpty() const { return m_prio_heap.size() == 0; }
     bool isStallMapEmpty() { return m_stall_msg_map.size() == 0; }
     unsigned int getStallMapSize() { return m_stall_msg_map.size(); }
 
-    unsigned int getSize();
+    unsigned int getSize(Tick curTime);
 
     void clear();
     void print(std::ostream& out) const;
@@ -148,17 +128,10 @@ class MessageBuffer : public SimObject
     uint32_t functionalWrite(Packet *pkt);
 
   private:
-    //added by SS
-    const Cycles m_recycle_latency;
-
     void reanalyzeList(std::list<MsgPtr> &, Tick);
 
   private:
     // Data Members (m_ prefix)
-    //! The two ends of the buffer.
-    ClockedObject* m_sender;
-    ClockedObject* m_receiver;
-
     //! Consumer to signal a wakeup(), can be NULL
     Consumer* m_consumer;
     std::vector<MsgPtr> m_prio_heap;
@@ -170,12 +143,12 @@ class MessageBuffer : public SimObject
     StallMsgMapType m_stall_msg_map;
 
     const unsigned int m_max_size;
-    Cycles m_time_last_time_size_checked;
+    Tick m_time_last_time_size_checked;
     unsigned int m_size_last_time_size_checked;
 
     // variables used so enqueues appear to happen immediately, while
     // pop happen the next cycle
-    Cycles m_time_last_time_enqueue;
+    Tick m_time_last_time_enqueue;
     Tick m_time_last_time_pop;
     Tick m_last_arrival_time;
 
@@ -193,7 +166,7 @@ class MessageBuffer : public SimObject
     int m_vnet_id;
 };
 
-Cycles random_time();
+Tick random_time();
 
 inline std::ostream&
 operator<<(std::ostream& out, const MessageBuffer& obj)
index 88c528e30f0121b9868d6faf26aa56a511c81fc6..d8a028532ca039d63c09f2d4e3d8999072eee1bc 100644 (file)
@@ -37,7 +37,6 @@ class MessageBuffer(SimObject):
     ordered = Param.Bool(False, "Whether the buffer is ordered")
     buffer_size = Param.Unsigned(0, "Maximum number of entries to buffer \
                                      (0 allows infinite entries)")
-    recycle_latency = Param.Cycles(Parent.recycle_latency, "")
     randomization = Param.Bool(False, "")
 
     master = MasterPort("Master port to MessageBuffer receiver")
index c7bd6178a4b40ed7693820e2e1397c81f4c7f70a..e350eba6bfcdbdbc7893a415be64dba958df25a4 100644 (file)
@@ -115,13 +115,6 @@ NetworkInterface_d::addNode(vector<MessageBuffer *>& in,
     for (auto& it : in) {
         if (it != nullptr) {
             it->setConsumer(this);
-            it->setReceiver(this);
-        }
-    }
-
-    for (auto& it : out) {
-        if (it != nullptr) {
-            it->setSender(this);
         }
     }
 }
@@ -222,6 +215,7 @@ NetworkInterface_d::wakeup()
     DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld", m_id, curCycle());
 
     MsgPtr msg_ptr;
+    Tick curTime = clockEdge();
 
     // Checking for messages coming from the protocol
     // can pick up a message/cycle for each virtual net
@@ -231,10 +225,10 @@ NetworkInterface_d::wakeup()
             continue;
         }
 
-        while (b->isReady()) { // Is there a message waiting
+        while (b->isReady(curTime)) { // Is there a message waiting
             msg_ptr = b->peekMsgPtr();
             if (flitisizeMessage(msg_ptr, vnet)) {
-                b->dequeue();
+                b->dequeue(curTime);
             } else {
                 break;
             }
@@ -253,7 +247,7 @@ NetworkInterface_d::wakeup()
             free_signal = true;
 
             outNode_ptr[t_flit->get_vnet()]->enqueue(
-                t_flit->get_msg_ptr(), Cycles(1));
+                t_flit->get_msg_ptr(), curTime, cyclesToTicks(Cycles(1)));
         }
         // Simply send a credit back since we are not buffering
         // this flit in the NI
@@ -363,7 +357,7 @@ NetworkInterface_d::checkReschedule()
             continue;
         }
 
-        while (it->isReady()) { // Is there a message waiting
+        while (it->isReady(clockEdge())) { // Is there a message waiting
             scheduleEvent(Cycles(1));
             return;
         }
index d834ea1a355d80f3fa7cdee3604ed900ede2e9ef..3d75ef8c2b739c07ad70501e3897be2ce4280242 100644 (file)
@@ -99,13 +99,6 @@ NetworkInterface::addNode(vector<MessageBuffer*>& in,
     for (auto& it: in) {
         if (it != nullptr) {
             it->setConsumer(this);
-            it->setReceiver(this);
-        }
-    }
-
-    for (auto& it : out) {
-        if (it != nullptr) {
-            it->setSender(this);
         }
     }
 }
@@ -250,10 +243,10 @@ NetworkInterface::wakeup()
             continue;
         }
 
-        while (b->isReady()) { // Is there a message waiting
+        while (b->isReady(clockEdge())) { // Is there a message waiting
             msg_ptr = b->peekMsgPtr();
             if (flitisizeMessage(msg_ptr, vnet)) {
-                b->dequeue();
+                b->dequeue(clockEdge());
             } else {
                 break;
             }
@@ -272,7 +265,7 @@ NetworkInterface::wakeup()
                     m_id, curCycle());
 
             outNode_ptr[t_flit->get_vnet()]->enqueue(
-                t_flit->get_msg_ptr(), Cycles(1));
+                t_flit->get_msg_ptr(), clockEdge(), cyclesToTicks(Cycles(1)));
 
             // signal the upstream router that this vc can be freed now
             inNetLink->release_vc_link(t_flit->get_vc(),
@@ -334,7 +327,7 @@ NetworkInterface::checkReschedule()
             continue;
         }
 
-        while (it->isReady()) { // Is there a message waiting
+        while (it->isReady(clockEdge())) { // Is there a message waiting
             scheduleEvent(Cycles(1));
             return;
         }
index 697357ccb86c7ee2730885b82316f253b37c02b8..301d453c52472ca590e8ae486b9ccf63e42ad75e 100644 (file)
@@ -144,8 +144,9 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
     // temporary vectors to store the routing results
     vector<LinkID> output_links;
     vector<NetDest> output_link_destinations;
+    Tick current_time = m_switch->clockEdge();
 
-    while (buffer->isReady()) {
+    while (buffer->isReady(current_time)) {
         DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
 
         // Peek at message
@@ -176,7 +177,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
                 for (int out = 0; out < m_out.size(); out++) {
                     int out_queue_length = 0;
                     for (int v = 0; v < m_virtual_networks; v++) {
-                        out_queue_length += m_out[out][v]->getSize();
+                        out_queue_length += m_out[out][v]->getSize(current_time);
                     }
                     int value =
                         (out_queue_length << 8) |
@@ -220,7 +221,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
         for (int i = 0; i < output_links.size(); i++) {
             int outgoing = output_links[i];
 
-            if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+            if (!m_out[outgoing][vnet]->areNSlotsAvailable(1, current_time))
                 enough = false;
 
             DPRINTF(RubyNetwork, "Checking if node is blocked ..."
@@ -251,7 +252,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
         }
 
         // Dequeue msg
-        buffer->dequeue();
+        buffer->dequeue(current_time);
         m_pending_message_count[vnet]--;
 
         // Enqueue it - for all outgoing queues
@@ -273,7 +274,8 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
                     "inport[%d][%d] to outport [%d][%d].\n",
                     incoming, vnet, outgoing, vnet);
 
-            m_out[outgoing][vnet]->enqueue(msg_ptr);
+            m_out[outgoing][vnet]->enqueue(msg_ptr, current_time,
+                                           m_switch->cyclesToTicks(Cycles(1)));
         }
     }
 }
index f4ec440a3a528b3b1cb0becdd80717b8e7b4a942..87de0fb46fd58fe814d2904777138d5132a7fa2b 100644 (file)
@@ -41,9 +41,6 @@ class SimpleNetwork(RubyNetwork):
     endpoint_bandwidth = Param.Int(1000, "bandwidth adjustment factor");
     adaptive_routing = Param.Bool(False, "enable adaptive routing");
     int_link_buffers = VectorParam.MessageBuffer("Buffers for int_links")
-    # int_links do not recycle buffers, so this parameter is not used.
-    # TODO: Move recycle_latency out of MessageBuffers and into controllers
-    recycle_latency = Param.Cycles(0, "")
 
     def setup_buffers(self):
         # Note that all SimpleNetwork MessageBuffers are currently ordered
@@ -82,6 +79,3 @@ class Switch(BasicRouter):
     virt_nets = Param.Int(Parent.number_of_virtual_networks,
                           "number of virtual networks")
     port_buffers = VectorParam.MessageBuffer("Port buffers")
-    # Ports do not recycle buffers, so this parameter is not used.
-    # TODO: Move recycle_latency out of MessageBuffers and into controllers
-    recycle_latency = Param.Cycles(0, "")
index b9d0b80103bfd1b1d95ab606964f1383cfe3f9ae..0951ef13846c1b2e2333b5cc8acc7885bda8df14 100644 (file)
@@ -69,12 +69,6 @@ void
 Switch::addInPort(const vector<MessageBuffer*>& in)
 {
     m_perfect_switch->addInPort(in);
-
-    for (auto& it : in) {
-        if (it != nullptr) {
-            it->setReceiver(this);
-        }
-    }
 }
 
 void
@@ -95,17 +89,10 @@ Switch::addOutPort(const vector<MessageBuffer*>& out,
     vector<MessageBuffer*> intermediateBuffers;
 
     for (int i = 0; i < out.size(); ++i) {
-        if (out[i] != nullptr) {
-            out[i]->setSender(this);
-        }
-
         assert(m_num_connected_buffers < m_port_buffers.size());
         MessageBuffer* buffer_ptr = m_port_buffers[m_num_connected_buffers];
         m_num_connected_buffers++;
         intermediateBuffers.push_back(buffer_ptr);
-
-        buffer_ptr->setSender(this);
-        buffer_ptr->setReceiver(this);
     }
 
     // Hook the queues to the PerfectSwitch
index 01d1f6fbe407436b7cba0ff5e595a800fbd1a6eb..3863ab944ea617ad2463d3da16a43254f6e8d5bd 100644 (file)
@@ -94,14 +94,16 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
     if (out == nullptr || in == nullptr) {
         return;
     }
-    assert(m_units_remaining[vnet] >= 0);
 
-    while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
-                                out->areNSlotsAvailable(1)) {
+    assert(m_units_remaining[vnet] >= 0);
+    Tick current_time = m_switch->clockEdge();
 
+    while (bw_remaining > 0 && (in->isReady(current_time) ||
+                                m_units_remaining[vnet] > 0) &&
+           out->areNSlotsAvailable(1, current_time)) {
         // See if we are done transferring the previous message on
         // this virtual network
-        if (m_units_remaining[vnet] == 0 && in->isReady()) {
+        if (m_units_remaining[vnet] == 0 && in->isReady(current_time)) {
             // Find the size of the message we are moving
             MsgPtr msg_ptr = in->peekMsgPtr();
             Message *net_msg_ptr = msg_ptr.get();
@@ -114,8 +116,9 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
                     m_ruby_system->curCycle());
 
             // Move the message
-            in->dequeue();
-            out->enqueue(msg_ptr, m_link_latency);
+            in->dequeue(current_time);
+            out->enqueue(msg_ptr, current_time,
+                         m_switch->cyclesToTicks(m_link_latency));
 
             // Count the message
             m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
@@ -128,8 +131,9 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
         bw_remaining = max(0, -diff);
     }
 
-    if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
-                             !out->areNSlotsAvailable(1)) {
+    if (bw_remaining > 0 && (in->isReady(current_time) ||
+                             m_units_remaining[vnet] > 0) &&
+        !out->areNSlotsAvailable(1, current_time)) {
         DPRINTF(RubyNetwork, "vnet: %d", vnet);
 
         // schedule me to wakeup again because I'm waiting for my
index 5bd38195a5e9f4bc07f63cccfef50394c7d017b1..be6438711a7b1192286afa080d0c094f27ad9fa3 100644 (file)
@@ -60,9 +60,6 @@ AbstractController::init()
         m_delayVCHistogram.push_back(new Stats::Histogram());
         m_delayVCHistogram[i]->init(10);
     }
-    if (getMemoryQueue()) {
-        getMemoryQueue()->setSender(this);
-    }
 }
 
 void
@@ -118,7 +115,8 @@ AbstractController::wakeUpBuffers(Addr addr)
              in_port_rank >= 0;
              in_port_rank--) {
             if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
-                (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
+                (*(m_waiting_buffers[addr]))[in_port_rank]->
+                    reanalyzeMessages(addr, clockEdge());
             }
         }
         delete m_waiting_buffers[addr];
@@ -138,7 +136,8 @@ AbstractController::wakeUpAllBuffers(Addr addr)
              in_port_rank >= 0;
              in_port_rank--) {
             if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
-                (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
+                (*(m_waiting_buffers[addr]))[in_port_rank]->
+                    reanalyzeMessages(addr, clockEdge());
             }
         }
         delete m_waiting_buffers[addr];
@@ -168,7 +167,7 @@ AbstractController::wakeUpAllBuffers()
                   //
                   if (*vec_iter != NULL &&
                       (wokeUpMsgBufs.count(*vec_iter) == 0)) {
-                      (*vec_iter)->reanalyzeAllMessages();
+                      (*vec_iter)->reanalyzeAllMessages(clockEdge());
                       wokeUpMsgBufs.insert(*vec_iter);
                   }
              }
@@ -328,7 +327,7 @@ AbstractController::recvTimingResp(PacketPtr pkt)
         panic("Incorrect packet type received from memory controller!");
     }
 
-    getMemoryQueue()->enqueue(msg);
+    getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
     delete pkt;
 }
 
index cbc51dae575745b7c675afd99b0467b15ded19c9..4a24a5b134eecf750970e2d6d46b519188f819d6 100644 (file)
@@ -47,12 +47,12 @@ class TBETable
     void allocate(Addr address);
     void deallocate(Addr address);
     bool
-    areNSlotsAvailable(int n) const
+    areNSlotsAvailable(int n, Tick current_time) const
     {
         return (m_number_of_TBEs - m_map.size()) >= n;
     }
 
-    ENTRYlookup(Addr address);
+    ENTRY *lookup(Addr address);
 
     // Print cache contents
     void print(std::ostream& out) const;
index 17dac6fc0c9fc0d7f5aebaf6497f28389d42e819..4809c8a476db346cf699c11f17353984d0ec57d0 100644 (file)
@@ -34,14 +34,12 @@ TimerTable::TimerTable()
     : m_next_time(0)
 {
     m_consumer_ptr  = NULL;
-    m_clockobj_ptr = NULL;
-
     m_next_valid = false;
     m_next_address = 0;
 }
 
 bool
-TimerTable::isReady() const
+TimerTable::isReady(Tick curTime) const
 {
     if (m_map.empty())
         return false;
@@ -50,14 +48,12 @@ TimerTable::isReady() const
         updateNext();
     }
     assert(m_next_valid);
-    return (m_clockobj_ptr->curCycle() >= m_next_time);
+    return (curTime >= m_next_time);
 }
 
 Addr
-TimerTable::readyAddress() const
+TimerTable::nextAddress() const
 {
-    assert(isReady());
-
     if (!m_next_valid) {
         updateNext();
     }
@@ -66,17 +62,14 @@ TimerTable::readyAddress() const
 }
 
 void
-TimerTable::set(Addr address, Cycles relative_latency)
+TimerTable::set(Addr address, Tick ready_time)
 {
     assert(address == makeLineAddress(address));
-    assert(relative_latency > 0);
     assert(!m_map.count(address));
 
-    Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
     m_map[address] = ready_time;
     assert(m_consumer_ptr != NULL);
-    m_consumer_ptr->
-        scheduleEventAbsolute(m_clockobj_ptr->clockPeriod() * ready_time);
+    m_consumer_ptr->scheduleEventAbsolute(ready_time);
     m_next_valid = false;
 
     // Don't always recalculate the next ready address
index 606201eb4369fd0966cf128d6ff47e8d89280855..9efe7ca048e21a32604a7734d510eeb1a41f4128 100644 (file)
@@ -49,25 +49,16 @@ class TimerTable
         m_consumer_ptr = consumer_ptr;
     }
 
-    void setClockObj(ClockedObject* obj)
-    {
-        assert(m_clockobj_ptr == NULL);
-        m_clockobj_ptr = obj;
-    }
-
     void
     setDescription(const std::string& name)
     {
         m_name = name;
     }
 
-    bool isReady() const;
-    Addr readyAddress() const;
+    bool isReady(Tick curTime) const;
+    Addr nextAddress() const;
     bool isSet(Addr address) const { return !!m_map.count(address); }
-    void set(Addr address, Cycles relative_latency);
-    void set(Addr address, uint64_t relative_latency)
-    { set(address, Cycles(relative_latency)); }
-
+    void set(Addr address, Tick ready_time);
     void unset(Addr address);
     void print(std::ostream& out) const;
 
@@ -82,14 +73,12 @@ class TimerTable
 
     // use a std::map for the address map as this container is sorted
     // and ensures a well-defined iteration order
-    typedef std::map<Addr, Cycles> AddressMap;
+    typedef std::map<Addr, Tick> AddressMap;
     AddressMap m_map;
     mutable bool m_next_valid;
-    mutable Cycles m_next_time; // Only valid if m_next_valid is true
+    mutable Tick m_next_time; // Only valid if m_next_valid is true
     mutable Addr m_next_address;  // Only valid if m_next_valid is true
 
-    //! Object used for querying time.
-    ClockedObject* m_clockobj_ptr;
     //! Consumer to signal a wakeup()
     Consumer* m_consumer_ptr;
 
index 85b476cfdcb8de73f7be3a9d1f52175ead8cccc5..3c895e62726fe4b29ddd2a76e3041851a892c682 100644 (file)
@@ -54,7 +54,6 @@ DMASequencer::init()
     MemObject::init();
     assert(m_controller != NULL);
     m_mandatory_q_ptr = m_controller->getMandatoryQueue();
-    m_mandatory_q_ptr->setSender(this);
     m_is_busy = false;
     m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
 
@@ -256,7 +255,7 @@ DMASequencer::makeRequest(PacketPtr pkt)
     }
 
     assert(m_mandatory_q_ptr != NULL);
-    m_mandatory_q_ptr->enqueue(msg);
+    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
     active_request.bytes_issued += msg->getLen();
 
     return RequestStatus_Issued;
@@ -302,7 +301,7 @@ DMASequencer::issueNext()
     }
 
     assert(m_mandatory_q_ptr != NULL);
-    m_mandatory_q_ptr->enqueue(msg);
+    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
     active_request.bytes_issued += msg->getLen();
     DPRINTF(RubyDma,
             "DMA request bytes issued %d, bytes completed %d, total len %d\n",
index c13aed97e8c948c50f7155405f733a3a157cb23e..e03d23774c381b0d6f8f5b2570e7167a37027177 100644 (file)
@@ -81,7 +81,6 @@ RubyPort::init()
 {
     assert(m_controller != NULL);
     m_mandatory_q_ptr = m_controller->getMandatoryQueue();
-    m_mandatory_q_ptr->setSender(this);
 }
 
 BaseMasterPort &
index 186e62d55ee97ccb00475b4b1b4f5fad41d9b3a4..815e270b6022a2c2f7960290c32db9e4017b64ff 100644 (file)
@@ -629,7 +629,7 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
     assert(latency > 0);
 
     assert(m_mandatory_q_ptr != NULL);
-    m_mandatory_q_ptr->enqueue(msg, latency);
+    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
 }
 
 template <class KEY, class VALUE>
index 930540494fa1111b3ac8726a5f59ee169df9f7ff..556643e4eba22f31bd5fb28b7a663456c630d233 100644 (file)
@@ -65,9 +65,10 @@ class EnqueueStatementAST(StatementAST):
         if self.latexpr != None:
             ret_type, rcode = self.latexpr.inline(True)
             code("(${{self.queue_name.var.code}}).enqueue(" \
-                 "out_msg, Cycles($rcode));")
+                 "out_msg, clockEdge(), cyclesToTicks(Cycles($rcode)));")
         else:
-            code("(${{self.queue_name.var.code}}).enqueue(out_msg);")
+            code("(${{self.queue_name.var.code}}).enqueue(out_msg, "\
+                 "clockEdge(), cyclesToTicks(Cycles(1)));")
 
         # End scope
         self.symtab.popFrame()
index 7cea70b32293a892034c9450815b4158aefce620..efc7ef928099c51339173d1df9bc4f3b975a7c32 100644 (file)
@@ -55,6 +55,8 @@ class ObjDeclAST(DeclAST):
             c_code = "m_machineID"
         elif self.ident == "clusterID":
             c_code = "m_clusterID"
+        elif self.ident == "recycle_latency":
+            c_code = "m_recycle_latency"
         else:
             c_code = "(*m_%s_ptr)" % (self.ident)
 
index f5ef91daf249e3328bfdd56c4798dcd4fd287719..00d26e908907ce0673591fb476b40f55122f8e0b 100644 (file)
@@ -77,7 +77,7 @@ class PeekStatementAST(StatementAST):
     if (m_is_blocking &&
         (m_block_map.count(in_msg_ptr->m_$address_field) == 1) &&
         (m_block_map[in_msg_ptr->m_$address_field] != &$qcode)) {
-            $qcode.delayHead();
+            $qcode.delayHead(clockEdge(), cyclesToTicks(Cycles(1)));
             continue;
     }
             ''')
index b2f6228719061e629be822c1d5cc18fb1222ea90..6ab2888b7e76faf1b40371d71065498eceb7b108 100644 (file)
@@ -45,5 +45,5 @@ class StallAndWaitStatementAST(StatementAST):
         address_code = self.address.var.code
         code('''
         stallBuffer(&($in_port_code), $address_code);
-        $in_port_code.stallMessage($address_code);
+        $in_port_code.stallMessage($address_code, clockEdge());
         ''')
index 42a81c0960ffe4aea98d85ee89f9dd05f397cba6..015d902b415b224360abeb0de31358e0ac3b8d2f 100644 (file)
@@ -580,24 +580,10 @@ $c_ident::initNetQueues()
 m_net_ptr->set${network}NetQueue(m_version + base, $vid->getOrdered(), $vnet,
                                  "$vnet_type", $vid);
 ''')
-                # Set the end
-                if network == "To":
-                    code('$vid->setSender(this);')
-                else:
-                    code('$vid->setReceiver(this);')
-
                 # Set Priority
                 if "rank" in var:
                     code('$vid->setPriority(${{var["rank"]}})')
 
-            else:
-                if var.type_ast.type.c_ident == "MessageBuffer":
-                    code('$vid->setReceiver(this);')
-                if var.ident.find("triggerQueue") >= 0:
-                    code('$vid->setSender(this);')
-                elif var.ident.find("optionalQueue") >= 0:
-                    code('$vid->setSender(this);')
-
         code.dedent()
         code('''
 }
@@ -637,9 +623,6 @@ $c_ident::init()
                         comment = "Type %s default" % vtype.ident
                         code('*$vid = ${{vtype["default"]}}; // $comment')
 
-                    if vtype.c_ident == "TimerTable":
-                        code('$vid->setClockObj(this);')
-
         # Set the prefetchers
         code()
         for prefetcher in self.prefetchers:
@@ -1293,7 +1276,7 @@ ${ident}_Controller::doTransitionWorker(${ident}_Event event,
             res = trans.resources
             for key,val in res.iteritems():
                 val = '''
-if (!%s.areNSlotsAvailable(%s))
+if (!%s.areNSlotsAvailable(%s, clockEdge()))
     return TransitionResult_ResourceStall;
 ''' % (key.code, val)
                 case_sorter.append(val)