-
/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id$
- *
- */
+machine(MachineType:L2Cache, "Token protocol")
+ : CacheMemory * L2cache;
+ int N_tokens;
+ Cycles l2_request_latency := 5;
+ Cycles l2_response_latency := 5;
+ bool filtering_enabled := "True";
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+
+ // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
+ vnet_type="response";
+ // this L2 bank -> mod-directory
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
+ vnet_type="request";
+ // this L2 bank -> a local L1
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
+ vnet_type="request";
+
+
+ // FROM the network to this local bank of L2 cache
+
+ // a local L1 || mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
+ vnet_type="response";
+ MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
+ vnet_type="persistent";
+ // mod-directory -> this L2 bank
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
+ vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
+ vnet_type="request";
-machine(L2Cache, "Token protocol")
- : CacheMemory * L2cacheMemory,
- int N_tokens,
- int l2_request_latency = 10,
- int l2_response_latency = 10,
- bool filtering_enabled = true
{
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
-
- // this L2 bank -> a local L1 || mod-directory
- MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false";
- // this L2 bank -> mod-directory
- MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false";
- // this L2 bank -> a local L1
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false";
-
-
- // FROM the network to this local bank of L2 cache
-
- // a local L1 || mod-directory -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false";
- MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
- // mod-directory -> this L2 bank
- MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false";
- // a local L1 -> this L2 bank
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false";
-
// STATES
- enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
+ state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
// Base states
- NP, desc="Not Present";
- I, desc="Idle";
- S, desc="Shared, not present in any local L1s";
- O, desc="Owned, not present in any L1s";
- M, desc="Modified, not present in any L1s";
+ NP, AccessPermission:Invalid, desc="Not Present";
+ I, AccessPermission:Invalid, desc="Idle";
+ S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
+ O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
+ M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
// Locked states
- I_L, "I^L", desc="Invalid, Locked";
- S_L, "S^L", desc="Shared, Locked";
+ I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
+ S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
}
// EVENTS
// Lock/Unlock
Persistent_GETX, desc="Another processor has priority to read/write";
Persistent_GETS, desc="Another processor has priority to read";
+ Persistent_GETS_Last_Token, desc="Another processor has priority to read";
Own_Lock_or_Unlock, desc="This processor now has priority";
}
DataBlock DataBlk, desc="data for the block";
}
- structure(DirEntry, desc="...") {
+ structure(DirEntry, desc="...", interface="AbstractEntry") {
Set Sharers, desc="Set of the internal processors that want the block in shared state";
bool exclusive, default="false", desc="if local exclusive is likely";
}
- external_type(PerfectCacheMemory) {
- void allocate(Address);
- void deallocate(Address);
- DirEntry lookup(Address);
- bool isTagPresent(Address);
+ structure(PerfectCacheMemory, external="yes") {
+ void allocate(Addr);
+ void deallocate(Addr);
+ DirEntry lookup(Addr);
+ bool isTagPresent(Addr);
}
- external_type(PersistentTable) {
- void persistentRequestLock(Address, MachineID, AccessType);
- void persistentRequestUnlock(Address, MachineID);
- MachineID findSmallest(Address);
- AccessType typeOfSmallest(Address);
- void markEntries(Address);
- bool isLocked(Address);
- int countStarvingForAddress(Address);
- int countReadStarvingForAddress(Address);
+ structure(PersistentTable, external="yes") {
+ void persistentRequestLock(Addr, MachineID, AccessType);
+ void persistentRequestUnlock(Addr, MachineID);
+ MachineID findSmallest(Addr);
+ AccessType typeOfSmallest(Addr);
+ void markEntries(Addr);
+ bool isLocked(Addr);
+ int countStarvingForAddress(Addr);
+ int countReadStarvingForAddress(Addr);
}
PersistentTable persistentTable;
- PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
+ PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
- Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
- if (L2cacheMemory.isTagPresent(addr)) {
- return static_cast(Entry, L2cacheMemory[addr]);
- }
- assert(false);
- return static_cast(Entry, L2cacheMemory[addr]);
+ Tick clockEdge();
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
+ return cache_entry;
}
- int getTokens(Address addr) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return getL2CacheEntry(addr).Tokens;
- } else {
- return 0;
- }
+ DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
+ return localDirectory.lookup(address);
}
- void changePermission(Address addr, AccessPermission permission) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory.changePermission(addr, permission);
- }
+ void functionalRead(Addr addr, Packet *pkt) {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
}
- bool isCacheTagPresent(Address addr) {
- return (L2cacheMemory.isTagPresent(addr) );
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
}
- State getState(Address addr) {
- if (isCacheTagPresent(addr)) {
- return getL2CacheEntry(addr).CacheState;
- } else if (persistentTable.isLocked(addr) == true) {
- return State:I_L;
+ int getTokens(Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Tokens;
} else {
- return State:NP;
+ return 0;
}
}
- string getStateStr(Address addr) {
- return L2Cache_State_to_string(getState(addr));
+ State getState(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ } else if (persistentTable.isLocked(addr)) {
+ return State:I_L;
+ } else {
+ return State:NP;
+ }
}
- void setState(Address addr, State state) {
-
+ void setState(Entry cache_entry, Addr addr, State state) {
- if (isCacheTagPresent(addr)) {
+ if (is_valid(cache_entry)) {
// Make sure the token count is in range
- assert(getL2CacheEntry(addr).Tokens >= 0);
- assert(getL2CacheEntry(addr).Tokens <= max_tokens());
+ assert(cache_entry.Tokens >= 0);
+ assert(cache_entry.Tokens <= max_tokens());
+ assert(cache_entry.Tokens != (max_tokens() / 2));
// Make sure we have no tokens in L
if ((state == State:I_L) ) {
- if (isCacheTagPresent(addr)) {
- assert(getL2CacheEntry(addr).Tokens == 0);
- }
+ assert(cache_entry.Tokens == 0);
}
// in M and E you have all the tokens
if (state == State:M ) {
- assert(getL2CacheEntry(addr).Tokens == max_tokens());
+ assert(cache_entry.Tokens == max_tokens());
}
// in NP you have no tokens
if (state == State:NP) {
- assert(getL2CacheEntry(addr).Tokens == 0);
+ assert(cache_entry.Tokens == 0);
}
// You have at least one token in S-like states
if (state == State:S ) {
- assert(getL2CacheEntry(addr).Tokens > 0);
+ assert(cache_entry.Tokens > 0);
}
// You have at least half the token in O-like states
if (state == State:O ) {
- assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
- // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ assert(cache_entry.Tokens > (max_tokens() / 2));
}
- getL2CacheEntry(addr).CacheState := state;
+ cache_entry.CacheState := state;
+ }
+ }
- // Set permission
- if (state == State:I) {
- changePermission(addr, AccessPermission:Invalid);
- } else if (state == State:S || state == State:O ) {
- changePermission(addr, AccessPermission:Read_Only);
- } else if (state == State:M ) {
- changePermission(addr, AccessPermission:Read_Write);
- } else {
- changePermission(addr, AccessPermission:Invalid);
- }
+ AccessPermission getAccessPermission(Addr addr) {
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L2Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L2Cache_State_to_permission(state));
}
}
- void removeSharer(Address addr, NodeID id) {
+ void removeSharer(Addr addr, NodeID id) {
if (localDirectory.isTagPresent(addr)) {
- localDirectory[addr].Sharers.remove(id);
- if (localDirectory[addr].Sharers.count() == 0) {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.remove(id);
+ if (dir_entry.Sharers.count() == 0) {
localDirectory.deallocate(addr);
}
}
}
- bool sharersExist(Address addr) {
+ bool sharersExist(Addr addr) {
if (localDirectory.isTagPresent(addr)) {
- if (localDirectory[addr].Sharers.count() > 0) {
+ DirEntry dir_entry := getDirEntry(addr);
+ if (dir_entry.Sharers.count() > 0) {
return true;
}
else {
}
}
- bool exclusiveExists(Address addr) {
+ bool exclusiveExists(Addr addr) {
if (localDirectory.isTagPresent(addr)) {
- if (localDirectory[addr].exclusive == true) {
+ DirEntry dir_entry := getDirEntry(addr);
+ if (dir_entry.exclusive) {
return true;
}
else {
}
// assumes that caller will check to make sure tag is present
- Set getSharers(Address addr) {
- return localDirectory[addr].Sharers;
+ Set getSharers(Addr addr) {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.Sharers;
}
- void setNewWriter(Address addr, NodeID id) {
+ void setNewWriter(Addr addr, NodeID id) {
if (localDirectory.isTagPresent(addr) == false) {
localDirectory.allocate(addr);
}
- localDirectory[addr].Sharers.clear();
- localDirectory[addr].Sharers.add(id);
- localDirectory[addr].exclusive := true;
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.clear();
+ dir_entry.Sharers.add(id);
+ dir_entry.exclusive := true;
}
- void addNewSharer(Address addr, NodeID id) {
+ void addNewSharer(Addr addr, NodeID id) {
if (localDirectory.isTagPresent(addr) == false) {
localDirectory.allocate(addr);
}
- localDirectory[addr].Sharers.add(id);
- // localDirectory[addr].exclusive := false;
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.add(id);
+ // dir_entry.exclusive := false;
}
- void clearExclusiveBitIfExists(Address addr) {
- if (localDirectory.isTagPresent(addr) == true) {
- localDirectory[addr].exclusive := false;
+ void clearExclusiveBitIfExists(Addr addr) {
+ if (localDirectory.isTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.exclusive := false;
}
}
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
- persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
+ persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
} else {
error("Unexpected message");
}
+ Entry cache_entry := getCacheEntry(in_msg.addr);
// React to the message based on the current state of the table
- if (persistentTable.isLocked(in_msg.Address)) {
-
- if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
- trigger(Event:Persistent_GETS, in_msg.Address);
+ if (persistentTable.isLocked(in_msg.addr)) {
+
+ if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
+ cache_entry);
+ } else {
+ trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
+ }
} else {
- trigger(Event:Persistent_GETX, in_msg.Address);
+ trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
}
}
else {
- trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
}
}
}
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:Transient_GETX, in_msg.Address);
+ trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
- trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
+ if (getTokens(cache_entry) == 1) {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
+ cache_entry);
}
else {
- trigger(Event:Transient_GETS, in_msg.Address);
+ trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
}
} else {
error("Unexpected message");
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:L1_GETX, in_msg.Address);
+ trigger(Event:L1_GETX, in_msg.addr, cache_entry);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
- trigger(Event:L1_GETS_Last_Token, in_msg.Address);
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
}
else {
- trigger(Event:L1_GETS, in_msg.Address);
+ trigger(Event:L1_GETS, in_msg.addr, cache_entry);
}
} else {
error("Unexpected message");
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
- if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+
+ if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.Address);
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack, in_msg.addr, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.Address);
+ trigger(Event:Data_Owner, in_msg.addr, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_Shared, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ trigger(Event:Data_Shared, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
+ in_msg.Type == CoherenceResponseType:WB_OWNED ||
+ in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
+ if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
// either room is available or the block is already present
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Tokens, in_msg.Address);
+ trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Shared_Data, in_msg.Address);
+ trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
}
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
//assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Owned, in_msg.Address);
+ trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
}
}
else {
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L2_Replacement,
+ L2cache.cacheProbe(in_msg.addr),
+ getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
}
} else if (in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:L1_INV, in_msg.Address);
+ trigger(Event:L1_INV, in_msg.addr, cache_entry);
} else {
error("Unexpected message");
}
} else {
if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack_All_Tokens, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_All_Tokens, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
+ in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
+ in_msg.Type == CoherenceResponseType:WB_OWNED ||
+ in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
// either room is available or the block is already present
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
assert(in_msg.Dirty == false);
- assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
- trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ assert( (getState(cache_entry, in_msg.addr) != State:NP)
+ && (getState(cache_entry, in_msg.addr) != State:I) );
+ trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
assert(in_msg.Dirty == false);
- trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
}
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
- trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
}
}
else {
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L2_Replacement,
+ L2cache.cacheProbe(in_msg.addr),
+ getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
}
} else if (in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:L1_INV, in_msg.Address);
+ trigger(Event:L1_INV, in_msg.addr, cache_entry);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Unexpected message");
}
}
peek(L1requestNetwork_in, RequestMsg) {
// if this is a retry or no local sharers, broadcast normally
-
- // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
- enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
- out_msg.Address := in_msg.Address;
+ enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
+ out_msg.addr := in_msg.addr;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := in_msg.RequestorMachine;
out_msg.RetryNum := in_msg.RetryNum;
//
- // If a statically shared L2 cache, then no other L2 caches can
+ // If a statically shared L2 cache, then no other L2 caches can
// store the block
//
//out_msg.Destination.broadcast(MachineType:L2Cache);
//out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
//out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
- out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Prefetch := in_msg.Prefetch;
action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
peek(responseNetwork_in, ResponseMsg) {
// FIXME, should use a 3rd vnet
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
out_msg.Type := in_msg.Type;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
out_msg.Tokens := in_msg.Tokens;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := in_msg.DataBlk;
}
action(c_cleanReplacement, "c", desc="Issue clean writeback") {
- if (getL2CacheEntry(address).Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
}
action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
- if (getL2CacheEntry(address).Dirty) {
+ if (cache_entry.Dirty) {
out_msg.MessageSize := MessageSizeType:Writeback_Data;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
} else {
out_msg.Type := CoherenceResponseType:ACK_OWNER;
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
- if (getL2CacheEntry(address).Tokens > N_tokens) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := N_tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
+ cache_entry.Tokens := cache_entry.Tokens - N_tokens;
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := 1;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := cache_entry.Tokens - 1;
}
}
}
action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
- if (getL2CacheEntry(address).Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getL2CacheEntry(address).Tokens > 0);
- if (getL2CacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ if (cache_entry.Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens - 1;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- getL2CacheEntry(address).Tokens := 1;
+ cache_entry.Tokens := 1;
}
action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getL2CacheEntry(address).Tokens > 0);
- if (getL2CacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.Tokens := cache_entry.Tokens - 1;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 1;
+ }
+
+ action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- getL2CacheEntry(address).Tokens := 1;
}
+ cache_entry.Tokens := 0;
}
// assert(persistentTable.isLocked(address));
peek(responseNetwork_in, ResponseMsg) {
// FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
out_msg.Type := in_msg.Type;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := in_msg.DataBlk;
//assert(persistentTable.isLocked(address));
peek(responseNetwork_in, ResponseMsg) {
// FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
out_msg.Type := CoherenceResponseType:DATA_SHARED;
} else {
+ assert(in_msg.Tokens < (max_tokens() / 2));
out_msg.Type := CoherenceResponseType:ACK;
}
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := in_msg.DataBlk;
// assert(persistentTable.isLocked(address));
peek(responseNetwork_in, ResponseMsg) {
// FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := in_msg.DataBlk;
action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
peek(responseNetwork_in, ResponseMsg) {
- removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
+ removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
}
}
action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
peek(requestNetwork_in, RequestMsg) {
- if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
+ if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
//profile_filter_action(1);
- DEBUG_EXPR("filtered message");
- DEBUG_EXPR(in_msg.RetryNum);
+ DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
+ in_msg.RetryNum);
}
else {
- enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
- out_msg.Address := in_msg.Address;
+ enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
+ out_msg.addr := in_msg.addr;
out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := in_msg.RequestorMachine;
-
+
//
// Currently assuming only one chip so all L1s are local
//
out_msg.Type := in_msg.Type;
out_msg.isLocal := false;
- out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Prefetch := in_msg.Prefetch;
}
}
}
-
action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- assert(getL2CacheEntry(address).Tokens > 0);
- //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
out_msg.Tokens := 1;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := cache_entry.Tokens - 1;
}
}
action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- assert(getL2CacheEntry(address).Tokens > 0);
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- out_msg.Tokens := 1;
+ out_msg.Tokens := cache_entry.Tokens;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := 0;
}
}
action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
-// assert(getL2CacheEntry(address).Tokens == max_tokens());
- //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ assert(is_valid(cache_entry));
+// assert(cache_entry.Tokens == max_tokens());
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
//out_msg.Tokens := max_tokens();
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.Tokens := cache_entry.Tokens;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
assert(in_msg.Tokens != 0);
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
+ cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
// this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
// may not trigger this action.
if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
- getL2CacheEntry(address).Dirty := true;
+ cache_entry.Dirty := true;
}
}
}
peek(L1requestNetwork_in, RequestMsg) {
if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
if (in_msg.Type == CoherenceRequestType:GETX) {
- setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
+ setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
+ addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
}
}
}
action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
peek(L1requestNetwork_in, RequestMsg) {
if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
- (isCacheTagPresent(address))) {
- L2cacheMemory.setMRU(address);
+ (is_valid(cache_entry))) {
+ L2cache.setMRU(address);
}
}
}
action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
- if (getL2CacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
- if (getL2CacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
- out_msg.Address := address;
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
out_msg.Destination.add(in_msg.Requestor);
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(u_writeDataToCache, "u", desc="Write data to cache") {
peek(responseNetwork_in, ResponseMsg) {
- getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
- if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
- getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if ((cache_entry.Dirty == false) && in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
}
}
}
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- L2cacheMemory.allocate(address, new Entry);
+ set_cache_entry(L2cache.allocate(address, new Entry));
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cacheMemory.deallocate(address);
+ L2cache.deallocate(address);
+ unset_cache_entry();
}
- //action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- // peek(L1requestNetwork_in, RequestMsg) {
- // AccessModeType not implemented
- //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
- // }
- //}
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cache.demand_hits;
+ }
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
peek(responseNetwork_in, ResponseMsg) {
- assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
+ if (in_msg.Type != CoherenceResponseType:ACK &&
+ in_msg.Type != CoherenceResponseType:WB_TOKENS) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
+ }
}
}
transition(NP, {L1_GETS, L1_GETX}) {
a_broadcastLocalRequest;
r_markNewSharer;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
}
- transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(NP,
+ {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
+ I_L) {
l_popPersistentQueue;
}
a_broadcastLocalRequest;
tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
r_markNewSharer;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
a_broadcastLocalRequest;
tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
r_markNewSharer;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
m_popRequestQueue;
}
- transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(I,
+ {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
+ I_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
}
- transition(S, Persistent_GETS, S_L) {
+ transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
f_sendAckWithAllButOneTokens;
l_popPersistentQueue;
}
tt_sendLocalAckWithCollectedTokens;
r_markNewSharer;
r_setMRU;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
l_popPersistentQueue;
}
+ transition(O, Persistent_GETS_Last_Token, I_L) {
+ fa_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
transition(O, Transient_GETS) {
// send multiple tokens
r_clearExclusive;
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataOwnerFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataAndAllTokensFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataAndAllTokensFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(I_L, {L1_GETX, L1_GETS}) {
a_broadcastLocalRequest;
r_markNewSharer;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
tt_sendLocalAckWithCollectedTokens;
r_markNewSharer;
r_setMRU;
- //uu_profileMiss;
+ uu_profileMiss;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
l_popPersistentQueue;
}
- transition(S_L, Persistent_GETS) {
+ transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}