Ruby: Remove CacheMsg class from SLICC
authorNilay Vaish <nilay@cs.wisc.edu>
Tue, 22 Mar 2011 11:41:54 +0000 (06:41 -0500)
committerNilay Vaish <nilay@cs.wisc.edu>
Tue, 22 Mar 2011 11:41:54 +0000 (06:41 -0500)
The goal of the patch is to do away with the CacheMsg class currently in use
in coherence protocols. In place of CacheMsg, the RubyRequest class will used.
This class is already present in slicc_interface/RubyRequest.hh. In fact,
objects of class CacheMsg are generated by copying values from a RubyRequest
object.

23 files changed:
src/mem/protocol/MESI_CMP_directory-L1cache.sm
src/mem/protocol/MI_example-cache.sm
src/mem/protocol/MOESI_CMP_directory-L1cache.sm
src/mem/protocol/MOESI_CMP_token-L1cache.sm
src/mem/protocol/MOESI_hammer-cache.sm
src/mem/protocol/Network_test-cache.sm
src/mem/protocol/RubySlicc_Exports.sm
src/mem/protocol/RubySlicc_Profiler.sm
src/mem/protocol/RubySlicc_Types.sm
src/mem/ruby/profiler/AddressProfiler.cc
src/mem/ruby/profiler/AddressProfiler.hh
src/mem/ruby/profiler/Profiler.cc
src/mem/ruby/profiler/Profiler.hh
src/mem/ruby/recorder/TraceRecord.cc
src/mem/ruby/slicc_interface/RubyRequest.cc
src/mem/ruby/slicc_interface/RubyRequest.hh
src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh
src/mem/ruby/slicc_interface/RubySlicc_Util.hh
src/mem/ruby/system/CacheMemory.cc
src/mem/ruby/system/CacheMemory.hh
src/mem/ruby/system/DMASequencer.cc
src/mem/ruby/system/RubyPort.cc
src/mem/ruby/system/Sequencer.cc

index 705b2831c4e8d675051aa86fe9e546c871c06415..26f5b1ff6db1f27248502c53162df4a522c9c17f 100644 (file)
@@ -267,9 +267,9 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
   }
 
   // Mandatory Queue betweens Node's CPU and it's L1 caches
-  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
     if (mandatoryQueue_in.isReady()) {
-      peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
 
@@ -338,7 +338,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
 
   // ACTIONS
   action(a_issueGETS, "a", desc="Issue GETS") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:GETS;
@@ -355,7 +355,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
   }
 
   action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:GET_INSTR;
@@ -373,7 +373,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
 
 
   action(b_issueGETX, "b", desc="Issue GETX") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:GETX;
@@ -391,7 +391,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
   }
 
   action(c_issueUPGRADE, "c", desc="Issue GETX") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:UPGRADE;
index 7923ef65c2e5a4b64a186160bdb64bda47ee8bff..7adadbade45e8c7a86067b17448db7b1c41d15f3 100644 (file)
@@ -181,9 +181,9 @@ machine(L1Cache, "MI Example L1 Cache")
   }
 
     // Mandatory Queue
-  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
     if (mandatoryQueue_in.isReady()) {
-      peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         Entry cache_entry := getCacheEntry(in_msg.LineAddress);
         if (is_invalid(cache_entry) &&
@@ -281,7 +281,7 @@ machine(L1Cache, "MI Example L1 Cache")
   }
 
   action(p_profileMiss, "p", desc="Profile cache miss") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       cacheMemory.profileMiss(in_msg);
     }
   }
index 291621af91d4da9eedfa9485f5bc4b494716e7ab..50bb710cb1d14841732e4b5f804cbda394a80f07 100644 (file)
@@ -303,9 +303,9 @@ machine(L1Cache, "Directory protocol")
 
   // Nothing from the unblock network
   // Mandatory Queue betweens Node's CPU and it's L1 caches
-  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
     if (mandatoryQueue_in.isReady()) {
-      peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
 
@@ -380,7 +380,7 @@ machine(L1Cache, "Directory protocol")
   // ACTIONS
 
   action(a_issueGETS, "a", desc="Issue GETS") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:GETS;
@@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
   }
 
   action(b_issueGETX, "b", desc="Issue GETX") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:GETX;
@@ -820,7 +820,7 @@ machine(L1Cache, "Directory protocol")
 
 
   action(uu_profileMiss, "\u", desc="Profile the demand miss") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       //      profile_miss(in_msg);
     }
   }
index 8537029e77ee4b90ac0e9b11c09be1c6f0cb2c89..d7344d779037b91f7d4055b1000e3f120519602a 100644 (file)
@@ -622,9 +622,9 @@ machine(L1Cache, "Token protocol")
   }
 
   // Mandatory Queue
-  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
+  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
     if (mandatoryQueue_in.isReady()) {
-      peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
 
         TBE tbe := L1_TBEs[in_msg.LineAddress];
@@ -1310,7 +1310,7 @@ machine(L1Cache, "Token protocol")
     L1_TBEs.allocate(address);
     set_tbe(L1_TBEs[address]);
     tbe.IssueCount := 0;
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       tbe.PC := in_msg.ProgramCounter;
       tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
       if (in_msg.Type == RubyRequestType:ATOMIC) {
@@ -1323,7 +1323,7 @@ machine(L1Cache, "Token protocol")
   }
 
   action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
     }
   }
@@ -1499,7 +1499,7 @@ machine(L1Cache, "Token protocol")
   }
 
   action(uu_profileMiss, "\u", desc="Profile the demand miss") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       if (L1DcacheMemory.isTagPresent(address)) {
         L1DcacheMemory.profileMiss(in_msg);
       } else {
@@ -1516,7 +1516,7 @@ machine(L1Cache, "Token protocol")
   }
 
   action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
     } 
     stall_and_wait(mandatoryQueue_in, address);    
index ea2a1d6e350fd78249132dc6282bc58cbb3f17a8..865acf27523b77e445fd1a5466e33c10f708535b 100644 (file)
@@ -352,9 +352,9 @@ machine(L1Cache, "AMD Hammer-like protocol")
   // Nothing from the request network
 
   // Mandatory Queue
-  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
+  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
     if (mandatoryQueue_in.isReady()) {
-      peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
 
         // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
         TBE tbe := TBEs[in_msg.LineAddress];
@@ -695,7 +695,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
   action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
     assert(is_valid(cache_entry));
     DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
                               cache_entry.DataBlk);
 
@@ -1022,7 +1022,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
   }
 
   action(uu_profileMiss, "\u", desc="Profile the demand miss") {
-    peek(mandatoryQueue_in, CacheMsg) {
+    peek(mandatoryQueue_in, RubyRequest) {
       if (L1IcacheMemory.isTagPresent(address)) {
         L1IcacheMemory.profileMiss(in_msg);
       } else if (L1DcacheMemory.isTagPresent(address)) {
index 603c1f5f96906fab58d3c7be340f800644509201..814cd5c29bd612c8253c74fe6468b1f3b788b98b 100644 (file)
@@ -132,9 +132,9 @@ machine(L1Cache, "Network_test L1 Cache")
   out_port(responseNetwork_out, RequestMsg, responseFromCache);
 
   // Mandatory Queue
-  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
     if (mandatoryQueue_in.isReady()) {
-      peek(mandatoryQueue_in, CacheMsg) {
+      peek(mandatoryQueue_in, RubyRequest) {
         trigger(mandatory_request_type_to_event(in_msg.Type),
                 in_msg.LineAddress,
                 getCacheEntry(in_msg.LineAddress),
index 1f7a1dda24462930bd0a4667ecbbd703172c1ac9..0ef3df29b76440b794fc2a88388811f7aa118385 100644 (file)
@@ -213,17 +213,6 @@ enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
   L2_HW, desc="This is a L2 hardware prefetch";
 }
 
-// CacheMsg
-structure(CacheMsg, desc="...", interface="Message") {
-  Address LineAddress,       desc="Line address for this request";
-  Address PhysicalAddress,   desc="Physical address for this request";
-  RubyRequestType Type,      desc="Type of request (LD, ST, etc)";
-  Address ProgramCounter,    desc="Program counter of the instruction that caused the miss";
-  RubyAccessMode AccessMode, desc="user/supervisor access type";
-  int Size,                  desc="size in bytes of access";
-  PrefetchBit Prefetch,      desc="Is this a prefetch request";
-}
-
 // CacheMsg
 structure(SequencerMsg, desc="...", interface="Message") {
   Address LineAddress,       desc="Line address for this request";
index ed6b10d8eb18c51f84b5e7b51e1531af859fddbe..773bf0025ce16f12c9af61949b541a9cbfb93247 100644 (file)
@@ -34,10 +34,10 @@ void profileCacheCLBsize(int size, int numStaleI);
 void profileMemoryCLBsize(int size, int numStaleI);
 
 // used by 2level exclusive cache protocols
-void profile_miss(CacheMsg msg);
+void profile_miss(RubyRequest msg);
 
 // used by non-fast path protocols
-void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
+void profile_L1Cache_miss(RubyRequest msg, NodeID l1cacheID);
 
 // used by CMP protocols
 void profile_request(std::string L1CacheStateStr, std::string L2CacheStateStr,
index d9c3077a2ad4cacee14e07503c36e3798029b3eb..118cbc2f0430c2caa8746ea93e15c6ee50a88814 100644 (file)
@@ -109,6 +109,16 @@ structure (Sequencer, external = "yes") {
   void profileNack(Address, int, int, uint64);
 }
 
+structure(RubyRequest, desc="...", interface="Message", external="yes") {
+  Address LineAddress,       desc="Line address for this request";
+  Address PhysicalAddress,   desc="Physical address for this request";
+  RubyRequestType Type,      desc="Type of request (LD, ST, etc)";
+  Address ProgramCounter,    desc="Program counter of the instruction that caused the miss";
+  RubyAccessMode AccessMode, desc="user/supervisor access type";
+  int Size,                  desc="size in bytes of access";
+  PrefetchBit Prefetch,      desc="Is this a prefetch request";
+}
+
 external_type(AbstractEntry, primitive="yes");
 
 structure (DirectoryMemory, external = "yes") {
@@ -126,7 +136,7 @@ structure (CacheMemory, external = "yes") {
   void deallocate(Address);
   AbstractCacheEntry lookup(Address);
   bool isTagPresent(Address);
-  void profileMiss(CacheMsg);
+  void profileMiss(RubyRequest);
 
   void profileGenericRequest(GenericRequestType,
                              RubyAccessMode,
index 722845c450ebccde15d1af765014c0da261280bd..a9c9a959100432ae7c9c3bbdcfa16702523749c1 100644 (file)
@@ -29,7 +29,7 @@
 #include <vector>
 
 #include "base/stl_helpers.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
 #include "mem/ruby/profiler/AddressProfiler.hh"
 #include "mem/ruby/profiler/Profiler.hh"
 #include "mem/ruby/system/System.hh"
index 471feaaa5a0a9d91e8632d73766a6ca914fc1c02..e525a792fd5793df8a1e7b72bb915d96860cb75f 100644 (file)
@@ -33,7 +33,7 @@
 
 #include "base/hashmap.hh"
 #include "mem/protocol/AccessType.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
 #include "mem/ruby/common/Address.hh"
 #include "mem/ruby/common/Global.hh"
 #include "mem/ruby/common/Histogram.hh"
index 8604d014feca92a33714aa9096dc7688c2ec6393..ed7c25c9d7e62a8f8047e9c13c76d6ad10ac76d6 100644 (file)
@@ -51,7 +51,7 @@
 
 #include "base/stl_helpers.hh"
 #include "base/str.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
 #include "mem/protocol/MachineType.hh"
 #include "mem/protocol/Protocol.hh"
 #include "mem/ruby/network/Network.hh"
@@ -535,7 +535,7 @@ Profiler::clearStats()
 }
 
 void
-Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
+Profiler::addAddressTraceSample(const RubyRequest& msg, NodeID id)
 {
     if (msg.getType() != RubyRequestType_IFETCH) {
         // Note: The following line should be commented out if you
index 8e3e7f54765a6a48f24947b7b85458b1558aa82e..352ba453f39456ce81f74421392af4610c255a72 100644 (file)
@@ -68,7 +68,7 @@
 #include "params/RubyProfiler.hh"
 #include "sim/sim_object.hh"
 
-class CacheMsg;
+class RubyRequest;
 class AddressProfiler;
 
 class Profiler : public SimObject, public Consumer
@@ -93,7 +93,7 @@ class Profiler : public SimObject, public Consumer
     AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
     AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
 
-    void addAddressTraceSample(const CacheMsg& msg, NodeID id);
+    void addAddressTraceSample(const RubyRequest& msg, NodeID id);
 
     void profileRequest(const std::string& requestStr);
     void profileSharing(const Address& addr, AccessType type,
index e8cff9459b90390186722786771f2d9d077f65ff..aa54ee53cd3b4728574a2e1f396cd267f51c0c9c 100644 (file)
@@ -26,7 +26,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
 #include "mem/ruby/recorder/TraceRecord.hh"
 #include "mem/ruby/system/Sequencer.hh"
 #include "mem/ruby/system/System.hh"
index 2d8c94ed600d9ce520e2281b7c5ee6a88e2697a7..2aae61d7bef971257f5d58c27e32b584f84beafb 100644 (file)
@@ -4,34 +4,17 @@
 
 using namespace std;
 
-ostream&
-operator<<(ostream& out, const RubyRequest& obj)
+void
+RubyRequest::print(ostream& out) const
 {
-    out << hex << "0x" << obj.paddr << " data: 0x" << flush;
-    for (int i = 0; i < obj.len; i++) {
-        out << (int)obj.data[i];
-    }
-    out << dec << " type: " << RubyRequestType_to_string(obj.type) << endl;
-    return out;
-}
-
-vector<string>
-tokenizeString(string str, string delims)
-{
-    vector<string> tokens;
-    char* pch;
-    char* tmp;
-    const char* c_delims = delims.c_str();
-    tmp = new char[str.length()+1];
-    strcpy(tmp, str.c_str());
-    pch = strtok(tmp, c_delims);
-    while (pch != NULL) {
-        string tmp_str(pch);
-        if (tmp_str == "null") tmp_str = "";
-        tokens.push_back(tmp_str);
-
-        pch = strtok(NULL, c_delims);
-    }
-    delete [] tmp;
-    return tokens;
+  out << "[RubyRequest: ";
+  out << "LineAddress = " << m_LineAddress << " ";
+  out << "PhysicalAddress = " << m_PhysicalAddress << " ";
+  out << "Type = " << m_Type << " ";
+  out << "ProgramCounter = " << m_ProgramCounter << " ";
+  out << "AccessMode = " << m_AccessMode << " ";
+  out << "Size = " << m_Size << " ";
+  out << "Prefetch = " << m_Prefetch << " ";
+//  out << "Time = " << getTime() << " ";
+  out << "]";
 }
index d7acfd57831503bc138f0d977e1495570d6b4abe..06ca0de1cf3707f95793db80352d012e3de3c600 100644 (file)
 
 typedef void* RubyPortHandle;
 
-class RubyRequest
+class RubyRequest : public Message
 {
   public:
-    uint64_t paddr;
+    Address m_PhysicalAddress;
+    Address m_LineAddress;
+    RubyRequestType m_Type;
+    Address m_ProgramCounter;
+    RubyAccessMode m_AccessMode;
+    int m_Size;
+    PrefetchBit m_Prefetch;
     uint8_t* data;
-    int len;
-    uint64_t pc;
-    RubyRequestType type;
-    RubyAccessMode access_mode;
     PacketPtr pkt;
     unsigned proc_id;
 
     RubyRequest() {}
-    RubyRequest(uint64_t _paddr,
-                uint8_t* _data,
-                int _len,
-                uint64_t _pc,
-                RubyRequestType _type,
-                RubyAccessMode _access_mode,
-                PacketPtr _pkt,
-                unsigned _proc_id = 100)
-        : paddr(_paddr),
+    RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc,
+                RubyRequestType _type, RubyAccessMode _access_mode,
+                PacketPtr _pkt, PrefetchBit _pb = PrefetchBit_No,
+                 unsigned _proc_id = 100)
+        : m_PhysicalAddress(_paddr),
+          m_Type(_type),
+          m_ProgramCounter(_pc),
+          m_AccessMode(_access_mode),
+          m_Size(_len),
+          m_Prefetch(_pb),
           data(_data),
-          len(_len),
-          pc(_pc),
-          type(_type),
-          access_mode(_access_mode),
           pkt(_pkt),
           proc_id(_proc_id)
-    {}
+    {
+      m_LineAddress = m_PhysicalAddress;
+      m_LineAddress.makeLineAddress();
+    }
+
+    static RubyRequest*
+    create()
+    {
+        return new RubyRequest();
+    }
+
+    RubyRequest*
+    clone() const
+    {
+        return new RubyRequest(*this);
+    }
+
+    const Address&
+    getLineAddress() const
+    {
+        return m_LineAddress;
+    }
+
+    const Address&
+    getPhysicalAddress() const
+    {
+        return m_PhysicalAddress;
+    }
+
+    const RubyRequestType&
+    getType() const
+    {
+        return m_Type;
+    }
+
+    const Address&
+    getProgramCounter() const
+    {
+        return m_ProgramCounter;
+    }
+
+    const RubyAccessMode&
+    getAccessMode() const
+    {
+      return m_AccessMode;
+    }
+
+    const int&
+    getSize() const
+    {
+      return m_Size;
+    }
+
+    const PrefetchBit&
+    getPrefetch() const
+    {
+      return m_Prefetch;
+    }
 
     void print(std::ostream& out) const;
 };
 
-std::ostream& operator<<(std::ostream& out, const RubyRequest& obj);
+inline std::ostream&
+operator<<(std::ostream& out, const RubyRequest& obj)
+{
+  obj.print(out);
+  out << std::flush;
+  return out;
+}
 
 #endif
index cabba286a75259c21d55e9b902e1aedc46950d99..f23e15c9122ce20a56b4ed8767e6791d6a011898 100644 (file)
@@ -56,8 +56,8 @@ void profile_request(const std::string& L1CacheStateStr,
                      const std::string& L2CacheStateStr,
                      const std::string& directoryStateStr,
                      const std::string& requestTypeStr);
-void profile_miss(const CacheMsg& msg, NodeID id);
-void profile_L1Cache_miss(const CacheMsg& msg, NodeID id);
+void profile_miss(const RubyRequest& msg, NodeID id);
+void profile_L1Cache_miss(const RubyRequest& msg, NodeID id);
 void profile_token_retry(const Address& addr, AccessType type, int count);
 void profile_filter_action(int action);
 void profile_persistent_prediction(const Address& addr, AccessType type);
index bb23406b9d27f36a624acfcc4597d28552638d21..058a51bae23e0576618d33442be44956f2563a5b 100644 (file)
@@ -36,8 +36,6 @@
 #include <cassert>
 
 #include "mem/protocol/AccessType.hh"
-#include "mem/protocol/CacheMsg.hh"
-#include "mem/protocol/RubyRequestType.hh"
 #include "mem/protocol/Directory_State.hh"
 #include "mem/protocol/GenericRequestType.hh"
 #include "mem/protocol/L1Cache_State.hh"
index ea5054e4c58894d313ce461d400a89c18acfe089..192fa8c871fe943d89085b59d9ed0588d1120f2f 100644 (file)
@@ -344,7 +344,7 @@ CacheMemory::setMRU(const Address& address)
 }
 
 void
-CacheMemory::profileMiss(const CacheMsg& msg)
+CacheMemory::profileMiss(const RubyRequest& msg)
 {
     m_profiler_ptr->addCacheStatSample(msg.getType(), 
                                        msg.getAccessMode(),
index 4e7acd4ec935d22cd96986272808777677e141e1..197ac9f401d0d3a0cbe5cc106c31e8d25a18ac6e 100644 (file)
@@ -35,7 +35,7 @@
 
 #include "base/hashmap.hh"
 #include "mem/protocol/AccessPermission.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
 #include "mem/protocol/RubyRequestType.hh"
 #include "mem/protocol/GenericRequestType.hh"
 #include "mem/protocol/MachineType.hh"
@@ -107,7 +107,7 @@ class CacheMemory : public SimObject
     // Set this address to most recently used
     void setMRU(const Address& address);
 
-    void profileMiss(const CacheMsg & msg);
+    void profileMiss(const RubyRequest & msg);
 
     void profileGenericRequest(GenericRequestType requestType,
                                RubyAccessMode accessType,
index 772bc5142c5855fbaf0be79267508d4f7ee407c7..2889c0c57d0ccee099a109013d419a702edf9e0c 100644 (file)
@@ -53,11 +53,11 @@ DMASequencer::makeRequest(const RubyRequest &request)
         return RequestStatus_BufferFull;
     }
 
-    uint64_t paddr = request.paddr;
+    uint64_t paddr = request.m_PhysicalAddress.getAddress();
     uint8_t* data = request.data;
-    int len = request.len;
+    int len = request.m_Size;
     bool write = false;
-    switch(request.type) {
+    switch(request.m_Type) {
       case RubyRequestType_LD:
         write = false;
         break;
index c79154566764921b785d3c4e2e9ea59d7ae3dbf7..92627740f6c62932338a9773c751918cb5223c0c 100644 (file)
@@ -253,7 +253,7 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
                              pkt->getSize(), pc, type,
                              RubyAccessMode_Supervisor, pkt);
 
-    assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <=
+    assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <=
         RubySystem::getBlockSizeBytes());
 
     // Submit the ruby request
index a5f1a06fab1a15fa1d012105f703065048e0554a..7eb46e006b5aedfd52c11810a31844a6723dc241 100644 (file)
@@ -29,7 +29,6 @@
 #include "base/str.hh"
 #include "base/misc.hh"
 #include "cpu/testers/rubytest/RubyTester.hh"
-#include "mem/protocol/CacheMsg.hh"
 #include "mem/protocol/Protocol.hh"
 #include "mem/protocol/Protocol.hh"
 #include "mem/ruby/buffers/MessageBuffer.hh"
@@ -104,7 +103,7 @@ Sequencer::wakeup()
         panic("Possible Deadlock detected. Aborting!\n"
              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
              "current time: %u issue_time: %d difference: %d\n", m_version,
-             request->ruby_request.paddr, m_readRequestTable.size(),
+             request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(),
              current_time, request->issue_time,
              current_time - request->issue_time);
     }
@@ -119,7 +118,7 @@ Sequencer::wakeup()
         panic("Possible Deadlock detected. Aborting!\n"
              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
              "current time: %u issue_time: %d difference: %d\n", m_version,
-             request->ruby_request.paddr, m_writeRequestTable.size(),
+             request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(),
              current_time, request->issue_time,
              current_time - request->issue_time);
     }
@@ -227,15 +226,15 @@ Sequencer::insertRequest(SequencerRequest* request)
         schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
     }
 
-    Address line_addr(request->ruby_request.paddr);
+    Address line_addr(request->ruby_request.m_PhysicalAddress);
     line_addr.makeLineAddress();
-    if ((request->ruby_request.type == RubyRequestType_ST) ||
-        (request->ruby_request.type == RubyRequestType_RMW_Read) ||
-        (request->ruby_request.type == RubyRequestType_RMW_Write) ||
-        (request->ruby_request.type == RubyRequestType_Load_Linked) ||
-        (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
-        (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
-        (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
+    if ((request->ruby_request.m_Type == RubyRequestType_ST) ||
+        (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+        (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+        (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+        (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+        (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+        (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
         pair<RequestTable::iterator, bool> r =
             m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
         bool success = r.second;
@@ -288,15 +287,15 @@ Sequencer::removeRequest(SequencerRequest* srequest)
            m_writeRequestTable.size() + m_readRequestTable.size());
 
     const RubyRequest & ruby_request = srequest->ruby_request;
-    Address line_addr(ruby_request.paddr);
+    Address line_addr(ruby_request.m_PhysicalAddress);
     line_addr.makeLineAddress();
-    if ((ruby_request.type == RubyRequestType_ST) ||
-        (ruby_request.type == RubyRequestType_RMW_Read) ||
-        (ruby_request.type == RubyRequestType_RMW_Write) ||
-        (ruby_request.type == RubyRequestType_Load_Linked) ||
-        (ruby_request.type == RubyRequestType_Store_Conditional) ||
-        (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
-        (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
+    if ((ruby_request.m_Type == RubyRequestType_ST) ||
+        (ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+        (ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+        (ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+        (ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+        (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+        (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
         m_writeRequestTable.erase(line_addr);
     } else {
         m_readRequestTable.erase(line_addr);
@@ -314,7 +313,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
     // longer locked.
     //
     bool success = true;
-    if (request->ruby_request.type == RubyRequestType_Store_Conditional) {
+    if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) {
         if (!m_dataCache_ptr->isLocked(address, m_version)) {
             //
             // For failed SC requests, indicate the failure to the cpu by
@@ -333,7 +332,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
         // Independent of success, all SC operations must clear the lock
         //
         m_dataCache_ptr->clearLocked(address);
-    } else if (request->ruby_request.type == RubyRequestType_Load_Linked) {
+    } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) {
         //
         // Note: To fully follow Alpha LLSC semantics, should the LL clear any
         // previously locked cache lines?
@@ -380,13 +379,13 @@ Sequencer::writeCallback(const Address& address,
     m_writeRequestTable.erase(i);
     markRemoved();
 
-    assert((request->ruby_request.type == RubyRequestType_ST) ||
-           (request->ruby_request.type == RubyRequestType_RMW_Read) ||
-           (request->ruby_request.type == RubyRequestType_RMW_Write) ||
-           (request->ruby_request.type == RubyRequestType_Load_Linked) ||
-           (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
-           (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
-           (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
+    assert((request->ruby_request.m_Type == RubyRequestType_ST) ||
+           (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+           (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+           (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+           (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+           (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+           (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write));
 
     //
     // For Alpha, properly handle LL, SC, and write requests with respect to
@@ -398,9 +397,9 @@ Sequencer::writeCallback(const Address& address,
     if(!m_usingNetworkTester)
         success = handleLlsc(address, request);
 
-    if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
+    if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) {
         m_controller->blockOnQueue(address, m_mandatory_q_ptr);
-    } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
+    } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) {
         m_controller->unblock(address);
     }
 
@@ -440,8 +439,8 @@ Sequencer::readCallback(const Address& address,
     m_readRequestTable.erase(i);
     markRemoved();
 
-    assert((request->ruby_request.type == RubyRequestType_LD) ||
-           (request->ruby_request.type == RubyRequestType_IFETCH));
+    assert((request->ruby_request.m_Type == RubyRequestType_LD) ||
+           (request->ruby_request.m_Type == RubyRequestType_IFETCH));
 
     hitCallback(request, mach, data, true, 
                 initialRequestTime, forwardRequestTime, firstResponseTime);
@@ -457,10 +456,10 @@ Sequencer::hitCallback(SequencerRequest* srequest,
                        Time firstResponseTime)
 {
     const RubyRequest & ruby_request = srequest->ruby_request;
-    Address request_address(ruby_request.paddr);
-    Address request_line_address(ruby_request.paddr);
+    Address request_address(ruby_request.m_PhysicalAddress);
+    Address request_line_address(ruby_request.m_PhysicalAddress);
     request_line_address.makeLineAddress();
-    RubyRequestType type = ruby_request.type;
+    RubyRequestType type = ruby_request.m_Type;
     Time issued_time = srequest->issue_time;
 
     // Set this cache entry to the most recently used
@@ -498,7 +497,7 @@ Sequencer::hitCallback(SequencerRequest* srequest,
         DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n",
             g_eventQueue_ptr->getTime(), m_version, "Seq",
             success ? "Done" : "SC_Failed", "", "",
-            Address(ruby_request.paddr), miss_latency);
+            ruby_request.m_PhysicalAddress, miss_latency);
     }
 #if 0
     if (request.getPrefetch() == PrefetchBit_Yes) {
@@ -514,11 +513,11 @@ Sequencer::hitCallback(SequencerRequest* srequest,
             (type == RubyRequestType_Locked_RMW_Read) ||
             (type == RubyRequestType_Load_Linked)) {
             memcpy(ruby_request.data,
-                   data.getData(request_address.getOffset(), ruby_request.len),
-                   ruby_request.len);
+                   data.getData(request_address.getOffset(), ruby_request.m_Size),
+                   ruby_request.m_Size);
         } else {
             data.setData(ruby_request.data, request_address.getOffset(),
-                         ruby_request.len);
+                         ruby_request.m_Size);
         }
     } else {
         DPRINTF(MemoryAccess,
@@ -548,21 +547,21 @@ RequestStatus
 Sequencer::getRequestStatus(const RubyRequest& request)
 {
     bool is_outstanding_store =
-        !!m_writeRequestTable.count(line_address(Address(request.paddr)));
+        !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress));
     bool is_outstanding_load =
-        !!m_readRequestTable.count(line_address(Address(request.paddr)));
+        !!m_readRequestTable.count(line_address(request.m_PhysicalAddress));
     if (is_outstanding_store) {
-        if ((request.type == RubyRequestType_LD) ||
-            (request.type == RubyRequestType_IFETCH) ||
-            (request.type == RubyRequestType_RMW_Read)) {
+        if ((request.m_Type == RubyRequestType_LD) ||
+            (request.m_Type == RubyRequestType_IFETCH) ||
+            (request.m_Type == RubyRequestType_RMW_Read)) {
             m_store_waiting_on_load_cycles++;
         } else {
             m_store_waiting_on_store_cycles++;
         }
         return RequestStatus_Aliased;
     } else if (is_outstanding_load) {
-        if ((request.type == RubyRequestType_ST) ||
-            (request.type == RubyRequestType_RMW_Write)) {
+        if ((request.m_Type == RubyRequestType_ST) ||
+            (request.m_Type == RubyRequestType_RMW_Write)) {
             m_load_waiting_on_store_cycles++;
         } else {
             m_load_waiting_on_load_cycles++;
@@ -586,7 +585,7 @@ Sequencer::empty() const
 RequestStatus
 Sequencer::makeRequest(const RubyRequest &request)
 {
-    assert(Address(request.paddr).getOffset() + request.len <=
+    assert(request.m_PhysicalAddress.getOffset() + request.m_Size <=
            RubySystem::getBlockSizeBytes());
     RequestStatus status = getRequestStatus(request);
     if (status != RequestStatus_Ready)
@@ -610,11 +609,10 @@ Sequencer::makeRequest(const RubyRequest &request)
 void
 Sequencer::issueRequest(const RubyRequest& request)
 {
-    // TODO: get rid of CacheMsg, RubyRequestType, and
-    // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
-    // natively
+    // TODO: Eliminate RubyRequest being copied again.
+
     RubyRequestType ctype;
-    switch(request.type) {
+    switch(request.m_Type) {
       case RubyRequestType_IFETCH:
         ctype = RubyRequestType_IFETCH;
         break;
@@ -651,7 +649,7 @@ Sequencer::issueRequest(const RubyRequest& request)
     }
 
     RubyAccessMode amtype;
-    switch(request.access_mode){
+    switch(request.m_AccessMode){
       case RubyAccessMode_User:
         amtype = RubyAccessMode_User;
         break;
@@ -665,19 +663,21 @@ Sequencer::issueRequest(const RubyRequest& request)
         assert(0);
     }
 
-    Address line_addr(request.paddr);
+    Address line_addr(request.m_PhysicalAddress);
     line_addr.makeLineAddress();
-    CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
-        Address(request.pc), amtype, request.len, PrefetchBit_No,
-        request.proc_id);
+    RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(),
+                                       request.data, request.m_Size,
+                                       request.m_ProgramCounter.getAddress(),
+                                       ctype, amtype, request.pkt,
+                                       PrefetchBit_No, request.proc_id);
 
     DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n",
         g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "",
-        Address(request.paddr), RubyRequestType_to_string(request.type));
+        request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type));
 
     Time latency = 0;  // initialzed to an null value
 
-    if (request.type == RubyRequestType_IFETCH)
+    if (request.m_Type == RubyRequestType_IFETCH)
         latency = m_instCache_ptr->getLatency();
     else
         latency = m_dataCache_ptr->getLatency();