}
// Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
}
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:UPGRADE;
}
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
if (is_invalid(cache_entry) &&
}
action(p_profileMiss, "p", desc="Profile cache miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
cacheMemory.profileMiss(in_msg);
}
}
// Nothing from the unblock network
// Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
}
action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
// profile_miss(in_msg);
}
}
}
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
TBE tbe := L1_TBEs[in_msg.LineAddress];
L1_TBEs.allocate(address);
set_tbe(L1_TBEs[address]);
tbe.IssueCount := 0;
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
if (in_msg.Type == RubyRequestType:ATOMIC) {
}
action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
}
}
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.profileMiss(in_msg);
} else {
}
action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
}
stall_and_wait(mandatoryQueue_in, address);
// Nothing from the request network
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
TBE tbe := TBEs[in_msg.LineAddress];
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
cache_entry.DataBlk);
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
if (L1IcacheMemory.isTagPresent(address)) {
L1IcacheMemory.profileMiss(in_msg);
} else if (L1DcacheMemory.isTagPresent(address)) {
out_port(responseNetwork_out, RequestMsg, responseFromCache);
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
getCacheEntry(in_msg.LineAddress),
L2_HW, desc="This is a L2 hardware prefetch";
}
-// CacheMsg
-structure(CacheMsg, desc="...", interface="Message") {
- Address LineAddress, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- RubyRequestType Type, desc="Type of request (LD, ST, etc)";
- Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- int Size, desc="size in bytes of access";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
-}
-
// CacheMsg
structure(SequencerMsg, desc="...", interface="Message") {
Address LineAddress, desc="Line address for this request";
void profileMemoryCLBsize(int size, int numStaleI);
// used by 2level exclusive cache protocols
-void profile_miss(CacheMsg msg);
+void profile_miss(RubyRequest msg);
// used by non-fast path protocols
-void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
+void profile_L1Cache_miss(RubyRequest msg, NodeID l1cacheID);
// used by CMP protocols
void profile_request(std::string L1CacheStateStr, std::string L2CacheStateStr,
void profileNack(Address, int, int, uint64);
}
+structure(RubyRequest, desc="...", interface="Message", external="yes") {
+ Address LineAddress, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ RubyRequestType Type, desc="Type of request (LD, ST, etc)";
+ Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ int Size, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
external_type(AbstractEntry, primitive="yes");
structure (DirectoryMemory, external = "yes") {
void deallocate(Address);
AbstractCacheEntry lookup(Address);
bool isTagPresent(Address);
- void profileMiss(CacheMsg);
+ void profileMiss(RubyRequest);
void profileGenericRequest(GenericRequestType,
RubyAccessMode,
#include <vector>
#include "base/stl_helpers.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
#include "base/hashmap.hh"
#include "mem/protocol/AccessType.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
#include "base/stl_helpers.hh"
#include "base/str.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/network/Network.hh"
}
void
-Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
+Profiler::addAddressTraceSample(const RubyRequest& msg, NodeID id)
{
if (msg.getType() != RubyRequestType_IFETCH) {
// Note: The following line should be commented out if you
#include "params/RubyProfiler.hh"
#include "sim/sim_object.hh"
-class CacheMsg;
+class RubyRequest;
class AddressProfiler;
class Profiler : public SimObject, public Consumer
AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
- void addAddressTraceSample(const CacheMsg& msg, NodeID id);
+ void addAddressTraceSample(const RubyRequest& msg, NodeID id);
void profileRequest(const std::string& requestStr);
void profileSharing(const Address& addr, AccessType type,
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/recorder/TraceRecord.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
using namespace std;
-ostream&
-operator<<(ostream& out, const RubyRequest& obj)
+void
+RubyRequest::print(ostream& out) const
{
- out << hex << "0x" << obj.paddr << " data: 0x" << flush;
- for (int i = 0; i < obj.len; i++) {
- out << (int)obj.data[i];
- }
- out << dec << " type: " << RubyRequestType_to_string(obj.type) << endl;
- return out;
-}
-
-vector<string>
-tokenizeString(string str, string delims)
-{
- vector<string> tokens;
- char* pch;
- char* tmp;
- const char* c_delims = delims.c_str();
- tmp = new char[str.length()+1];
- strcpy(tmp, str.c_str());
- pch = strtok(tmp, c_delims);
- while (pch != NULL) {
- string tmp_str(pch);
- if (tmp_str == "null") tmp_str = "";
- tokens.push_back(tmp_str);
-
- pch = strtok(NULL, c_delims);
- }
- delete [] tmp;
- return tokens;
+ out << "[RubyRequest: ";
+ out << "LineAddress = " << m_LineAddress << " ";
+ out << "PhysicalAddress = " << m_PhysicalAddress << " ";
+ out << "Type = " << m_Type << " ";
+ out << "ProgramCounter = " << m_ProgramCounter << " ";
+ out << "AccessMode = " << m_AccessMode << " ";
+ out << "Size = " << m_Size << " ";
+ out << "Prefetch = " << m_Prefetch << " ";
+// out << "Time = " << getTime() << " ";
+ out << "]";
}
typedef void* RubyPortHandle;
-class RubyRequest
+class RubyRequest : public Message
{
public:
- uint64_t paddr;
+ Address m_PhysicalAddress;
+ Address m_LineAddress;
+ RubyRequestType m_Type;
+ Address m_ProgramCounter;
+ RubyAccessMode m_AccessMode;
+ int m_Size;
+ PrefetchBit m_Prefetch;
uint8_t* data;
- int len;
- uint64_t pc;
- RubyRequestType type;
- RubyAccessMode access_mode;
PacketPtr pkt;
unsigned proc_id;
RubyRequest() {}
- RubyRequest(uint64_t _paddr,
- uint8_t* _data,
- int _len,
- uint64_t _pc,
- RubyRequestType _type,
- RubyAccessMode _access_mode,
- PacketPtr _pkt,
- unsigned _proc_id = 100)
- : paddr(_paddr),
+ RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc,
+ RubyRequestType _type, RubyAccessMode _access_mode,
+ PacketPtr _pkt, PrefetchBit _pb = PrefetchBit_No,
+ unsigned _proc_id = 100)
+ : m_PhysicalAddress(_paddr),
+ m_Type(_type),
+ m_ProgramCounter(_pc),
+ m_AccessMode(_access_mode),
+ m_Size(_len),
+ m_Prefetch(_pb),
data(_data),
- len(_len),
- pc(_pc),
- type(_type),
- access_mode(_access_mode),
pkt(_pkt),
proc_id(_proc_id)
- {}
+ {
+ m_LineAddress = m_PhysicalAddress;
+ m_LineAddress.makeLineAddress();
+ }
+
+ static RubyRequest*
+ create()
+ {
+ return new RubyRequest();
+ }
+
+ RubyRequest*
+ clone() const
+ {
+ return new RubyRequest(*this);
+ }
+
+ const Address&
+ getLineAddress() const
+ {
+ return m_LineAddress;
+ }
+
+ const Address&
+ getPhysicalAddress() const
+ {
+ return m_PhysicalAddress;
+ }
+
+ const RubyRequestType&
+ getType() const
+ {
+ return m_Type;
+ }
+
+ const Address&
+ getProgramCounter() const
+ {
+ return m_ProgramCounter;
+ }
+
+ const RubyAccessMode&
+ getAccessMode() const
+ {
+ return m_AccessMode;
+ }
+
+ const int&
+ getSize() const
+ {
+ return m_Size;
+ }
+
+ const PrefetchBit&
+ getPrefetch() const
+ {
+ return m_Prefetch;
+ }
void print(std::ostream& out) const;
};
-std::ostream& operator<<(std::ostream& out, const RubyRequest& obj);
+inline std::ostream&
+operator<<(std::ostream& out, const RubyRequest& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
#endif
const std::string& L2CacheStateStr,
const std::string& directoryStateStr,
const std::string& requestTypeStr);
-void profile_miss(const CacheMsg& msg, NodeID id);
-void profile_L1Cache_miss(const CacheMsg& msg, NodeID id);
+void profile_miss(const RubyRequest& msg, NodeID id);
+void profile_L1Cache_miss(const RubyRequest& msg, NodeID id);
void profile_token_retry(const Address& addr, AccessType type, int count);
void profile_filter_action(int action);
void profile_persistent_prediction(const Address& addr, AccessType type);
#include <cassert>
#include "mem/protocol/AccessType.hh"
-#include "mem/protocol/CacheMsg.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/Directory_State.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/L1Cache_State.hh"
}
void
-CacheMemory::profileMiss(const CacheMsg& msg)
+CacheMemory::profileMiss(const RubyRequest& msg)
{
m_profiler_ptr->addCacheStatSample(msg.getType(),
msg.getAccessMode(),
#include "base/hashmap.hh"
#include "mem/protocol/AccessPermission.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/MachineType.hh"
// Set this address to most recently used
void setMRU(const Address& address);
- void profileMiss(const CacheMsg & msg);
+ void profileMiss(const RubyRequest & msg);
void profileGenericRequest(GenericRequestType requestType,
RubyAccessMode accessType,
return RequestStatus_BufferFull;
}
- uint64_t paddr = request.paddr;
+ uint64_t paddr = request.m_PhysicalAddress.getAddress();
uint8_t* data = request.data;
- int len = request.len;
+ int len = request.m_Size;
bool write = false;
- switch(request.type) {
+ switch(request.m_Type) {
case RubyRequestType_LD:
write = false;
break;
pkt->getSize(), pc, type,
RubyAccessMode_Supervisor, pkt);
- assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <=
+ assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <=
RubySystem::getBlockSizeBytes());
// Submit the ruby request
#include "base/str.hh"
#include "base/misc.hh"
#include "cpu/testers/rubytest/RubyTester.hh"
-#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_readRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
- request->ruby_request.paddr, m_readRequestTable.size(),
+ request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_writeRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
- request->ruby_request.paddr, m_writeRequestTable.size(),
+ request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
}
- Address line_addr(request->ruby_request.paddr);
+ Address line_addr(request->ruby_request.m_PhysicalAddress);
line_addr.makeLineAddress();
- if ((request->ruby_request.type == RubyRequestType_ST) ||
- (request->ruby_request.type == RubyRequestType_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_RMW_Write) ||
- (request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
+ if ((request->ruby_request.m_Type == RubyRequestType_ST) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+ (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+ (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
m_writeRequestTable.size() + m_readRequestTable.size());
const RubyRequest & ruby_request = srequest->ruby_request;
- Address line_addr(ruby_request.paddr);
+ Address line_addr(ruby_request.m_PhysicalAddress);
line_addr.makeLineAddress();
- if ((ruby_request.type == RubyRequestType_ST) ||
- (ruby_request.type == RubyRequestType_RMW_Read) ||
- (ruby_request.type == RubyRequestType_RMW_Write) ||
- (ruby_request.type == RubyRequestType_Load_Linked) ||
- (ruby_request.type == RubyRequestType_Store_Conditional) ||
- (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
- (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
+ if ((ruby_request.m_Type == RubyRequestType_ST) ||
+ (ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+ (ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+ (ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+ (ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+ (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+ (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
m_writeRequestTable.erase(line_addr);
} else {
m_readRequestTable.erase(line_addr);
// longer locked.
//
bool success = true;
- if (request->ruby_request.type == RubyRequestType_Store_Conditional) {
+ if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) {
if (!m_dataCache_ptr->isLocked(address, m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
// Independent of success, all SC operations must clear the lock
//
m_dataCache_ptr->clearLocked(address);
- } else if (request->ruby_request.type == RubyRequestType_Load_Linked) {
+ } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
m_writeRequestTable.erase(i);
markRemoved();
- assert((request->ruby_request.type == RubyRequestType_ST) ||
- (request->ruby_request.type == RubyRequestType_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_RMW_Write) ||
- (request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
+ assert((request->ruby_request.m_Type == RubyRequestType_ST) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+ (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+ (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write));
//
// For Alpha, properly handle LL, SC, and write requests with respect to
if(!m_usingNetworkTester)
success = handleLlsc(address, request);
- if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
+ if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
- } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
+ } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) {
m_controller->unblock(address);
}
m_readRequestTable.erase(i);
markRemoved();
- assert((request->ruby_request.type == RubyRequestType_LD) ||
- (request->ruby_request.type == RubyRequestType_IFETCH));
+ assert((request->ruby_request.m_Type == RubyRequestType_LD) ||
+ (request->ruby_request.m_Type == RubyRequestType_IFETCH));
hitCallback(request, mach, data, true,
initialRequestTime, forwardRequestTime, firstResponseTime);
Time firstResponseTime)
{
const RubyRequest & ruby_request = srequest->ruby_request;
- Address request_address(ruby_request.paddr);
- Address request_line_address(ruby_request.paddr);
+ Address request_address(ruby_request.m_PhysicalAddress);
+ Address request_line_address(ruby_request.m_PhysicalAddress);
request_line_address.makeLineAddress();
- RubyRequestType type = ruby_request.type;
+ RubyRequestType type = ruby_request.m_Type;
Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n",
g_eventQueue_ptr->getTime(), m_version, "Seq",
success ? "Done" : "SC_Failed", "", "",
- Address(ruby_request.paddr), miss_latency);
+ ruby_request.m_PhysicalAddress, miss_latency);
}
#if 0
if (request.getPrefetch() == PrefetchBit_Yes) {
(type == RubyRequestType_Locked_RMW_Read) ||
(type == RubyRequestType_Load_Linked)) {
memcpy(ruby_request.data,
- data.getData(request_address.getOffset(), ruby_request.len),
- ruby_request.len);
+ data.getData(request_address.getOffset(), ruby_request.m_Size),
+ ruby_request.m_Size);
} else {
data.setData(ruby_request.data, request_address.getOffset(),
- ruby_request.len);
+ ruby_request.m_Size);
}
} else {
DPRINTF(MemoryAccess,
Sequencer::getRequestStatus(const RubyRequest& request)
{
bool is_outstanding_store =
- !!m_writeRequestTable.count(line_address(Address(request.paddr)));
+ !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress));
bool is_outstanding_load =
- !!m_readRequestTable.count(line_address(Address(request.paddr)));
+ !!m_readRequestTable.count(line_address(request.m_PhysicalAddress));
if (is_outstanding_store) {
- if ((request.type == RubyRequestType_LD) ||
- (request.type == RubyRequestType_IFETCH) ||
- (request.type == RubyRequestType_RMW_Read)) {
+ if ((request.m_Type == RubyRequestType_LD) ||
+ (request.m_Type == RubyRequestType_IFETCH) ||
+ (request.m_Type == RubyRequestType_RMW_Read)) {
m_store_waiting_on_load_cycles++;
} else {
m_store_waiting_on_store_cycles++;
}
return RequestStatus_Aliased;
} else if (is_outstanding_load) {
- if ((request.type == RubyRequestType_ST) ||
- (request.type == RubyRequestType_RMW_Write)) {
+ if ((request.m_Type == RubyRequestType_ST) ||
+ (request.m_Type == RubyRequestType_RMW_Write)) {
m_load_waiting_on_store_cycles++;
} else {
m_load_waiting_on_load_cycles++;
RequestStatus
Sequencer::makeRequest(const RubyRequest &request)
{
- assert(Address(request.paddr).getOffset() + request.len <=
+ assert(request.m_PhysicalAddress.getOffset() + request.m_Size <=
RubySystem::getBlockSizeBytes());
RequestStatus status = getRequestStatus(request);
if (status != RequestStatus_Ready)
void
Sequencer::issueRequest(const RubyRequest& request)
{
- // TODO: get rid of CacheMsg, RubyRequestType, and
- // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
- // natively
+ // TODO: Eliminate RubyRequest being copied again.
+
RubyRequestType ctype;
- switch(request.type) {
+ switch(request.m_Type) {
case RubyRequestType_IFETCH:
ctype = RubyRequestType_IFETCH;
break;
}
RubyAccessMode amtype;
- switch(request.access_mode){
+ switch(request.m_AccessMode){
case RubyAccessMode_User:
amtype = RubyAccessMode_User;
break;
assert(0);
}
- Address line_addr(request.paddr);
+ Address line_addr(request.m_PhysicalAddress);
line_addr.makeLineAddress();
- CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
- Address(request.pc), amtype, request.len, PrefetchBit_No,
- request.proc_id);
+ RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(),
+ request.data, request.m_Size,
+ request.m_ProgramCounter.getAddress(),
+ ctype, amtype, request.pkt,
+ PrefetchBit_No, request.proc_id);
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n",
g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "",
- Address(request.paddr), RubyRequestType_to_string(request.type));
+ request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type));
Time latency = 0; // initialzed to an null value
- if (request.type == RubyRequestType_IFETCH)
+ if (request.m_Type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
else
latency = m_dataCache_ptr->getLatency();