*
*/
-machine(L1Cache, "Token protocol") {
+machine(L1Cache, "Token protocol")
+ : Sequencer * sequencer,
+ CacheMemory * L1IcacheMemory,
+ CacheMemory * L1DcacheMemory,
+ int l2_select_num_bits,
+ int N_tokens,
+ int l1_request_latency = 2,
+ int l1_response_latency = 2,
+ int retry_threshold = 1,
+ int fixed_timeout_latency = 100,
+ bool dynamic_timeout_enabled = true,
+ bool no_mig_atomic = true
+{
// From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
+
// a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false";
- MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true";
+ MessageBuffer responseFromL1Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
+ MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer requestFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
+
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false";
+ MessageBuffer responseToL1Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
+ MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
// a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false";
- MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true";
+ MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
// STATES
- enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
- NP, "NP", desc="Not Present";
- I, "I", desc="Idle";
- S, "S", desc="Shared";
- O, "O", desc="Owned";
- M, "M", desc="Modified (dirty)";
- MM, "MM", desc="Modified (dirty and locally modified)";
- M_W, "M^W", desc="Modified (dirty), waiting";
- MM_W, "MM^W", desc="Modified (dirty and locally modified), waiting";
+ NP, AccessPermission:Invalid, "NP", desc="Not Present";
+ I, AccessPermission:Invalid, "I", desc="Idle";
+ S, AccessPermission:Read_Only, "S", desc="Shared";
+ O, AccessPermission:Read_Only, "O", desc="Owned";
+ M, AccessPermission:Read_Only, "M", desc="Modified (dirty)";
+ MM, AccessPermission:Read_Write, "MM", desc="Modified (dirty and locally modified)";
+ M_W, AccessPermission:Read_Only, "M^W", desc="Modified (dirty), waiting";
+ MM_W, AccessPermission:Read_Write, "MM^W", desc="Modified (dirty and locally modified), waiting";
// Transient States
- IM, "IM", desc="Issued GetX";
- SM, "SM", desc="Issued GetX, we still have an old copy of the line";
- OM, "OM", desc="Issued GetX, received data";
- IS, "IS", desc="Issued GetS";
+ IM, AccessPermission:Busy, "IM", desc="Issued GetX";
+ SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
+ IS, AccessPermission:Busy, "IS", desc="Issued GetS";
// Locked states
- I_L, "I^L", desc="Invalid, Locked";
- S_L, "S^L", desc="Shared, Locked";
- IM_L, "IM^L", desc="Invalid, Locked, trying to go to Modified";
- SM_L, "SM^L", desc="Shared, Locked, trying to go to Modified";
- IS_L, "IS^L", desc="Invalid, Locked, trying to go to Shared";
+ I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
+ S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
+ IM_L, AccessPermission:Busy, "IM^L", desc="Invalid, Locked, trying to go to Modified";
+ SM_L, AccessPermission:Busy, "SM^L", desc="Shared, Locked, trying to go to Modified";
+ IS_L, AccessPermission:Busy, "IS^L", desc="Invalid, Locked, trying to go to Shared";
}
// EVENTS
Load, desc="Load request from the processor";
Ifetch, desc="I-fetch request from the processor";
Store, desc="Store request from the processor";
+ Atomic, desc="Atomic request from the processor";
L1_Replacement, desc="L1 Replacement";
// Responses
// Lock/Unlock for distributed
Persistent_GETX, desc="Another processor has priority to read/write";
Persistent_GETS, desc="Another processor has priority to read";
+ Persistent_GETS_Last_Token, desc="Another processor has priority to read, no more tokens";
Own_Lock_or_Unlock, desc="This processor now has priority";
// Triggers
Use_TimeoutStarverX, desc="Timeout";
Use_TimeoutStarverS, desc="Timeout";
Use_TimeoutNoStarvers, desc="Timeout";
-
+ Use_TimeoutNoStarvers_NoMig, desc="Timeout Don't Migrate";
}
// TYPES
- int getRetryThreshold();
- int getFixedTimeoutLatency();
- bool getDynamicTimeoutEnabled();
-
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
bool WentPersistent, default="false", desc="Request went persistent";
bool ExternalResponse, default="false", desc="Response came from an external controller";
+ bool IsAtomic, default="false", desc="Request was an atomic request";
AccessType AccessType, desc="Type of request (used for profiling)";
Time IssueTime, desc="Time the request was issued";
- AccessModeType AccessMode, desc="user/supervisor access type";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- }
-
- external_type(TBETable) {
+ structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
+ structure(PersistentTable, external="yes") {
+ void persistentRequestLock(Address, MachineID, AccessType);
+ void persistentRequestUnlock(Address, MachineID);
+ bool okToIssueStarving(Address, MachineID);
+ MachineID findSmallest(Address);
+ AccessType typeOfSmallest(Address);
+ void markEntries(Address);
+ bool isLocked(Address);
+ int countStarvingForAddress(Address);
+ int countReadStarvingForAddress(Address);
+ }
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Address a);
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
- CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
- CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
bool starving, default="false";
+ int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
- PersistentTable persistentTable, constructor_hack="i";
+ PersistentTable persistentTable;
TimerTable useTimerTable;
TimerTable reissueTimerTable;
int outstandingPersistentRequests, default="0";
int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
- int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_vec[i]))";
+ int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_ptr))";
int averageLatencyEstimate() {
- DEBUG_EXPR( (averageLatencyCounter >> averageLatencyHysteresis) );
- profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
+ DPRINTF(RubySlicc, "%d\n",
+ (averageLatencyCounter >> averageLatencyHysteresis));
+ //profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
return averageLatencyCounter >> averageLatencyHysteresis;
}
void updateAverageLatencyEstimate(int latency) {
- DEBUG_EXPR( latency );
+ DPRINTF(RubySlicc, "%d\n", latency);
assert(latency >= 0);
// By subtracting the current average and then adding the most
averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
}
-
- Entry getCacheEntry(Address addr), return_by_ref="yes" {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory[addr];
- } else {
- return L1IcacheMemory[addr];
+ Entry getCacheEntry(Address addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
+ if(is_valid(L1Dcache_entry)) {
+ return L1Dcache_entry;
}
+
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
+ return L1Icache_entry;
}
- int getTokens(Address addr) {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory[addr].Tokens;
- } else if (L1IcacheMemory.isTagPresent(addr)) {
- return L1IcacheMemory[addr].Tokens;
- } else {
- return 0;
- }
+ DataBlock getDataBlock(Address addr), return_by_ref="yes" {
+ return getCacheEntry(addr).DataBlk;
}
- void changePermission(Address addr, AccessPermission permission) {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory.changePermission(addr, permission);
- } else {
- return L1IcacheMemory.changePermission(addr, permission);
- }
+ Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
+ return L1Dcache_entry;
}
- bool isCacheTagPresent(Address addr) {
- return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
+ return L1Icache_entry;
}
- State getState(Address addr) {
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ int getTokens(Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Tokens;
+ }
+ return 0;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Address addr) {
- if (L1_TBEs.isPresent(addr)) {
- return L1_TBEs[addr].TBEState;
- } else if (isCacheTagPresent(addr)) {
- return getCacheEntry(addr).CacheState;
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
} else {
if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
// Not in cache, in persistent table, but this processor isn't highest priority
}
}
- void setState(Address addr, State state) {
+ void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
- if (L1_TBEs.isPresent(addr)) {
+ if (is_valid(tbe)) {
assert(state != State:I);
assert(state != State:S);
assert(state != State:O);
assert(state != State:MM);
assert(state != State:M);
- L1_TBEs[addr].TBEState := state;
+ tbe.TBEState := state;
}
- if (isCacheTagPresent(addr)) {
+ if (is_valid(cache_entry)) {
// Make sure the token count is in range
- assert(getCacheEntry(addr).Tokens >= 0);
- assert(getCacheEntry(addr).Tokens <= max_tokens());
+ assert(cache_entry.Tokens >= 0);
+ assert(cache_entry.Tokens <= max_tokens());
+ assert(cache_entry.Tokens != (max_tokens() / 2));
if ((state == State:I_L) ||
(state == State:IM_L) ||
(state == State:IS_L)) {
// Make sure we have no tokens in the "Invalid, locked" states
- if (isCacheTagPresent(addr)) {
- assert(getCacheEntry(addr).Tokens == 0);
- }
+ assert(cache_entry.Tokens == 0);
// Make sure the line is locked
// assert(persistentTable.isLocked(addr));
} else if ((state == State:S_L) ||
(state == State:SM_L)) {
- assert(getCacheEntry(addr).Tokens >= 1);
+ assert(cache_entry.Tokens >= 1);
+ assert(cache_entry.Tokens < (max_tokens() / 2));
// Make sure the line is locked...
// assert(persistentTable.isLocked(addr));
// in M and E you have all the tokens
if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
- assert(getCacheEntry(addr).Tokens == max_tokens());
+ assert(cache_entry.Tokens == max_tokens());
}
// in NP you have no tokens
if (state == State:NP) {
- assert(getCacheEntry(addr).Tokens == 0);
+ assert(cache_entry.Tokens == 0);
}
// You have at least one token in S-like states
if (state == State:S || state == State:SM) {
- assert(getCacheEntry(addr).Tokens > 0);
+ assert(cache_entry.Tokens > 0);
}
// You have at least half the token in O-like states
if (state == State:O && state == State:OM) {
- assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one token
- assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ assert(cache_entry.Tokens > (max_tokens() / 2));
}
- getCacheEntry(addr).CacheState := state;
-
- // Set permission
- if (state == State:MM ||
- state == State:MM_W) {
- changePermission(addr, AccessPermission:Read_Write);
- } else if ((state == State:S) ||
- (state == State:O) ||
- (state == State:M) ||
- (state == State:M_W) ||
- (state == State:SM) ||
- (state == State:S_L) ||
- (state == State:SM_L) ||
- (state == State:OM)) {
- changePermission(addr, AccessPermission:Read_Only);
- } else {
- changePermission(addr, AccessPermission:Invalid);
- }
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Address addr) {
+ TBE tbe := L1_TBEs[addr];
+ if(is_valid(tbe)) {
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Address addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
}
}
- Event mandatory_request_type_to_event(CacheRequestType type) {
- if (type == CacheRequestType:LD) {
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
return Event:Load;
- } else if (type == CacheRequestType:IFETCH) {
+ } else if (type == RubyRequestType:IFETCH) {
return Event:Ifetch;
- } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ } else if (type == RubyRequestType:ST) {
return Event:Store;
+ } else if (type == RubyRequestType:ATOMIC) {
+ if (no_mig_atomic) {
+ return Event:Atomic;
+ } else {
+ return Event:Store;
+ }
} else {
- error("Invalid CacheRequestType");
+ error("Invalid RubyRequestType");
}
}
- AccessType cache_request_type_to_access_type(CacheRequestType type) {
- if ((type == CacheRequestType:LD) || (type == CacheRequestType:IFETCH)) {
+ AccessType cache_request_type_to_access_type(RubyRequestType type) {
+ if ((type == RubyRequestType:LD) || (type == RubyRequestType:IFETCH)) {
return AccessType:Read;
- } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
return AccessType:Write;
} else {
- error("Invalid CacheRequestType");
+ error("Invalid RubyRequestType");
}
}
GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
if (machineIDToMachineType(sender) == MachineType:L1Cache) {
- return GenericMachineType:L1Cache_wCC; // NOTE direct L1 hits should not call this
+ //
+ // NOTE direct local hits should not call this
+ //
+ return GenericMachineType:L1Cache_wCC;
} else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
- if ( sender == (map_L1CacheMachId_to_L2Cache(addr,machineID))) {
+
+ if (sender == (mapAddressToRange(addr,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits))) {
+
return GenericMachineType:L2Cache;
} else {
return GenericMachineType:L2Cache_wCC;
}
}
- bool okToIssueStarving(Address addr) {
- return persistentTable.okToIssueStarving(addr);
+ bool okToIssueStarving(Address addr, MachineID machinID) {
+ return persistentTable.okToIssueStarving(addr, machineID);
}
void markPersistentEntries(Address addr) {
persistentTable.markEntries(addr);
}
- MessageBuffer triggerQueue, ordered="false", random="false";
+ void setExternalResponse(TBE tbe) {
+ assert(is_valid(tbe));
+ tbe.ExternalResponse := true;
+ }
+
+ bool IsAtomic(TBE tbe) {
+ assert(is_valid(tbe));
+ return tbe.IsAtomic;
+ }
// ** OUT_PORTS **
out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
// ** IN_PORTS **
// Use Timer
- in_port(useTimerTable_in, Address, useTimerTable) {
+ in_port(useTimerTable_in, Address, useTimerTable, rank=5) {
if (useTimerTable_in.isReady()) {
- if (persistentTable.isLocked(useTimerTable.readyAddress()) && (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
+ TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
+
+ if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
+ (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
- trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress());
+ trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
+ } else {
+ trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
}
- else {
- trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress());
+ } else {
+ if (no_mig_atomic && IsAtomic(tbe)) {
+ trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
+ } else {
+ trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
}
}
- else {
- trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress());
- }
}
}
// Reissue Timer
- in_port(reissueTimerTable_in, Address, reissueTimerTable) {
+ in_port(reissueTimerTable_in, Address, reissueTimerTable, rank=4) {
if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
+ trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
+ getCacheEntry(reissueTimerTable.readyAddress()),
+ L1_TBEs[reissueTimerTable.readyAddress()]);
}
}
-
-
// Persistent Network
- in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache) {
+ in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
if (persistentNetwork_in.isReady()) {
- peek(persistentNetwork_in, PersistentMsg) {
+ peek(persistentNetwork_in, PersistentMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
// Apply the lockdown or unlockdown message to the table
}
// React to the message based on the current state of the table
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+ TBE tbe := L1_TBEs[in_msg.Address];
+
if (persistentTable.isLocked(in_msg.Address)) {
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
// Our Own Lock - this processor is highest priority
- trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
+ cache_entry, tbe);
} else {
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
- trigger(Event:Persistent_GETS, in_msg.Address);
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:Persistent_GETS, in_msg.Address,
+ cache_entry, tbe);
+ }
} else {
- trigger(Event:Persistent_GETX, in_msg.Address);
+ trigger(Event:Persistent_GETX, in_msg.Address,
+ cache_entry, tbe);
}
}
} else {
// Unlock case - no entries in the table
- trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
- }
- }
- }
- }
-
-
- // Request Network
- in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:GETX) {
- if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETX, in_msg.Address);
- }
- else {
- trigger(Event:Transient_GETX, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- if ( (L1DcacheMemory.isTagPresent(in_msg.Address) || L1IcacheMemory.isTagPresent(in_msg.Address)) && getCacheEntry(in_msg.Address).Tokens == 1) {
- if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address);
- }
- else {
- trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
- }
- }
- else {
- if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETS, in_msg.Address);
- }
- else {
- trigger(Event:Transient_GETS, in_msg.Address);
- }
- }
- } else {
- error("Unexpected message");
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
+ cache_entry, tbe);
}
}
}
}
// Response Network
- in_port(responseNetwork_in, ResponseMsg, responseToL1Cache) {
+ in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
if (responseNetwork_in.isReady()) {
- peek(responseNetwork_in, ResponseMsg) {
+ peek(responseNetwork_in, ResponseMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+ TBE tbe := L1_TBEs[in_msg.Address];
+
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
- if ( in_msg.SenderMachine == MachineType:L2Cache ) {
+ if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
+
+ if (in_msg.Sender == mapAddressToRange(in_msg.Address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits)) {
- if (in_msg.Sender == map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID)) {
// came from an off-chip L2 cache
- if (L1_TBEs.isPresent(in_msg.Address)) {
+ if (is_valid(tbe)) {
// L1_TBEs[in_msg.Address].ExternalResponse := true;
// profile_offchipL2_response(in_msg.Address);
}
else {
// profile_onchipL2_response(in_msg.Address );
}
- } else if ( in_msg.SenderMachine == MachineType:Directory ) {
- if (L1_TBEs.isPresent(in_msg.Address)) {
- L1_TBEs[in_msg.Address].ExternalResponse := true;
+ } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
+ if (is_valid(tbe)) {
+ setExternalResponse(tbe);
// profile_memory_response( in_msg.Address);
}
- } else if ( in_msg.SenderMachine == MachineType:L1Cache) {
- if (isLocalProcessor(machineID, in_msg.Sender) == false) {
- if (L1_TBEs.isPresent(in_msg.Address)) {
- // L1_TBEs[in_msg.Address].ExternalResponse := true;
+ } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ //if (isLocalProcessor(machineID, in_msg.Sender) == false) {
+ //if (is_valid(tbe)) {
+ // tbe.ExternalResponse := true;
// profile_offchipL1_response(in_msg.Address );
- }
- }
- else {
+ //}
+ //}
+ //else {
// profile_onchipL1_response(in_msg.Address );
- }
+ //}
} else {
error("unexpected SenderMachine");
}
- if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.Address);
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.Address);
+ trigger(Event:Data_Owner, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_Shared, in_msg.Address);
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Data_Shared, in_msg.Address, cache_entry, tbe);
} else {
error("Unexpected message");
}
} else {
if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack_All_Tokens, in_msg.Address);
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_All_Tokens, in_msg.Address);
+ trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry, tbe);
} else {
error("Unexpected message");
}
}
}
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg, block_on="Address") {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+ TBE tbe := L1_TBEs[in_msg.Address];
+
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETX, in_msg.Address,
+ cache_entry, tbe);
+ }
+ else {
+ trigger(Event:Transient_GETX, in_msg.Address,
+ cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address,
+ cache_entry, tbe);
+ }
+ else {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
+ cache_entry, tbe);
+ }
+ }
+ else {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETS, in_msg.Address,
+ cache_entry, tbe);
+ }
+ else {
+ trigger(Event:Transient_GETS, in_msg.Address,
+ cache_entry, tbe);
+ }
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- if (in_msg.Type == CacheRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
+ TBE tbe := L1_TBEs[in_msg.LineAddress];
- // Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
- // The block is in the wrong L1, try to write it to the L2
- trigger(Event:L1_Replacement, in_msg.Address);
- }
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
} else {
- if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1, try to write it to the L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Dcache_entry, tbe);
+ }
+
+ if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
} else {
// No room in the L1, so we need to make room
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement,
+ L1IcacheMemory.cacheProbe(in_msg.LineAddress),
+ getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
// *** DATA ACCESS ***
- // Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
- // The block is in the wrong L1, try to write it to the L2
- trigger(Event:L1_Replacement, in_msg.Address);
- }
-
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
} else {
- if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1, try to write it to the L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Icache_entry, tbe);
+ }
+
+ if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
} else {
// No room in the L1, so we need to make room
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement,
+ L1DcacheMemory.cacheProbe(in_msg.LineAddress),
+ getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
}
}
}
// ACTIONS
action(a_issueReadRequest, "a", desc="Issue GETS") {
- if (L1_TBEs[address].IssueCount == 0) {
+ assert(is_valid(tbe));
+ if (tbe.IssueCount == 0) {
// Update outstanding requests
- profile_outstanding_request(outstandingRequests);
+ //profile_outstanding_request(outstandingRequests);
outstandingRequests := outstandingRequests + 1;
}
- if (L1_TBEs[address].IssueCount >= getRetryThreshold() ) {
+ if (tbe.IssueCount >= retry_threshold) {
// Issue a persistent request if possible
- if (okToIssueStarving(address) && (starving == false)) {
- enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
+ if (okToIssueStarving(address, machineID) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
out_msg.Requestor := machineID;
out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
markPersistentEntries(address);
starving := true;
- if (L1_TBEs[address].IssueCount == 0) {
- profile_persistent_prediction(address, L1_TBEs[address].AccessType);
+ if (tbe.IssueCount == 0) {
+ //profile_persistent_prediction(address, tbe.AccessType);
}
// Update outstanding requests
- profile_outstanding_persistent_request(outstandingPersistentRequests);
+ //profile_outstanding_persistent_request(outstandingPersistentRequests);
outstandingPersistentRequests := outstandingPersistentRequests + 1;
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
- L1_TBEs[address].WentPersistent := true;
+ tbe.WentPersistent := true;
// Do not schedule a wakeup, a persistent requests will always complete
}
}
} else {
// Make a normal request
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
- if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
+ out_msg.RetryNum := tbe.IssueCount;
+ if (tbe.IssueCount == 0) {
out_msg.MessageSize := MessageSizeType:Request_Control;
} else {
out_msg.MessageSize := MessageSizeType:Reissue_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// send to other local L1s, with local bit set
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
- out_msg.Destination := getOtherLocalL1IDs(machineID);
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
+ //
+ // Since only one chip, assuming all L1 caches are local
+ //
+ //out_msg.Destination := getOtherLocalL1IDs(machineID);
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.remove(machineID);
+
+ out_msg.RetryNum := tbe.IssueCount;
out_msg.isLocal := true;
- if (L1_TBEs[address].IssueCount == 0) {
- out_msg.MessageSize := MessageSizeType:Request_Control;
+ if (tbe.IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
} else {
- out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
// Set a wakeup timer
- if (getDynamicTimeoutEnabled()) {
+ if (dynamic_timeout_enabled) {
reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
} else {
- reissueTimerTable.set(address, getFixedTimeoutLatency());
+ reissueTimerTable.set(address, fixed_timeout_latency);
}
}
action(b_issueWriteRequest, "b", desc="Issue GETX") {
- if (L1_TBEs[address].IssueCount == 0) {
+ assert(is_valid(tbe));
+ if (tbe.IssueCount == 0) {
// Update outstanding requests
- profile_outstanding_request(outstandingRequests);
+ //profile_outstanding_request(outstandingRequests);
outstandingRequests := outstandingRequests + 1;
}
- if (L1_TBEs[address].IssueCount >= getRetryThreshold() ) {
+ if (tbe.IssueCount >= retry_threshold) {
// Issue a persistent request if possible
- if ( okToIssueStarving(address) && (starving == false)) {
- enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
+ if ( okToIssueStarving(address, machineID) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
markPersistentEntries(address);
starving := true;
// Update outstanding requests
- profile_outstanding_persistent_request(outstandingPersistentRequests);
+ //profile_outstanding_persistent_request(outstandingPersistentRequests);
outstandingPersistentRequests := outstandingPersistentRequests + 1;
- if (L1_TBEs[address].IssueCount == 0) {
- profile_persistent_prediction(address, L1_TBEs[address].AccessType);
+ if (tbe.IssueCount == 0) {
+ //profile_persistent_prediction(address, tbe.AccessType);
}
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
- L1_TBEs[address].WentPersistent := true;
+ tbe.WentPersistent := true;
// Do not schedule a wakeup, a persistent requests will always complete
}
} else {
// Make a normal request
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
- if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
+ out_msg.RetryNum := tbe.IssueCount;
+
+ if (tbe.IssueCount == 0) {
out_msg.MessageSize := MessageSizeType:Request_Control;
} else {
out_msg.MessageSize := MessageSizeType:Reissue_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// send to other local L1s too
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.isLocal := true;
- out_msg.Destination := getOtherLocalL1IDs(machineID);
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
- if (L1_TBEs[address].IssueCount == 0) {
- out_msg.MessageSize := MessageSizeType:Request_Control;
+
+ //
+ // Since only one chip, assuming all L1 caches are local
+ //
+ //out_msg.Destination := getOtherLocalL1IDs(machineID);
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.remove(machineID);
+
+ out_msg.RetryNum := tbe.IssueCount;
+ if (tbe.IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
} else {
- out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
- DEBUG_EXPR("incremented issue count");
- DEBUG_EXPR(L1_TBEs[address].IssueCount);
+ DPRINTF(RubySlicc, "incremented issue count to %d\n",
+ tbe.IssueCount);
// Set a wakeup timer
- if (getDynamicTimeoutEnabled()) {
+ if (dynamic_timeout_enabled) {
reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
} else {
- reissueTimerTable.set(address, getFixedTimeoutLatency());
+ reissueTimerTable.set(address, fixed_timeout_latency);
}
}
}
action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
peek(responseNetwork_in, ResponseMsg) {
// FIXME, should use a 3rd vnet
- enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.Tokens := in_msg.Tokens;
out_msg.MessageSize := in_msg.MessageSize;
}
action(c_ownedReplacement, "c", desc="Issue writeback") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.Type := CoherenceResponseType:WB_OWNED;
// always send the data?
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
- action(cc_sharedReplacement, "\c", desc="Issue dirty writeback") {
+ action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
// don't send writeback if replacing block with no tokens
- if (getCacheEntry(address).Tokens != 0) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ assert (cache_entry.Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // assert(getCacheEntry(address).Dirty == false);
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // assert(cache_entry.Dirty == false);
+ out_msg.Dirty := false;
+
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // assert(cache_entry.Dirty == false);
out_msg.Dirty := false;
// always send the data?
- if (getCacheEntry(address).Tokens > 1) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.Type := CoherenceResponseType:WB_TOKENS;
- }
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.Type := CoherenceResponseType:WB_TOKENS;
}
- getCacheEntry(address).Tokens := 0;
}
+ cache_entry.Tokens := 0;
}
action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
+ assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := 1;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
out_msg.Dirty := false;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
}
}
}
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
- assert(getCacheEntry(address).Tokens >= 1);
+ cache_entry.Tokens := cache_entry.Tokens - 1;
+ assert(cache_entry.Tokens >= 1);
}
action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
+ assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- if (getCacheEntry(address).Tokens > N_tokens()) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := N_tokens();
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Tokens := N_tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
out_msg.Dirty := false;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - N_tokens();
+ cache_entry.Tokens := cache_entry.Tokens - N_tokens;
}
- else if (getCacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ else if (cache_entry.Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := 1;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
out_msg.Dirty := false;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := cache_entry.Tokens - 1;
}
}
-// assert(getCacheEntry(address).Tokens >= 1);
+// assert(cache_entry.Tokens >= 1);
}
action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
- assert(getCacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
} else {
}
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
// assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- if (getCacheEntry(address).Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:ACK;
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getCacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > 0);
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > 0);
- if (getCacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ if (cache_entry.Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:ACK;
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- if (getCacheEntry(address).Tokens > N_tokens()) {
- out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens();
+ assert(cache_entry.Tokens >= 1);
+ if (cache_entry.Tokens > N_tokens) {
+ out_msg.Tokens := cache_entry.Tokens - N_tokens;
} else {
- out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ out_msg.Tokens := cache_entry.Tokens - 1;
}
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- if (getCacheEntry(address).Tokens > N_tokens()) {
- getCacheEntry(address).Tokens := N_tokens();
+ if (cache_entry.Tokens > N_tokens) {
+ cache_entry.Tokens := N_tokens;
} else {
- getCacheEntry(address).Tokens := 1;
+ cache_entry.Tokens := 1;
}
}
action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > 0);
- if (getCacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- if (getCacheEntry(address).Tokens > N_tokens()) {
- out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens();
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ out_msg.Tokens := cache_entry.Tokens - N_tokens;
} else {
- out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ out_msg.Tokens := cache_entry.Tokens - 1;
}
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ assert(out_msg.Tokens > (max_tokens() / 2));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- if (getCacheEntry(address).Tokens > N_tokens()) {
- getCacheEntry(address).Tokens := N_tokens();
- } else {
- getCacheEntry(address).Tokens := 1;
- }
+ }
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ cache_entry.Tokens := N_tokens;
+ } else {
+ cache_entry.Tokens := 1;
}
}
+ action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := cache_entry.Tokens;
+ assert(out_msg.Tokens > (max_tokens() / 2));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 0;
+ }
+
action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
// assert(persistentTable.isLocked(address));
peek(responseNetwork_in, ResponseMsg) {
// assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
// FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := in_msg.DataBlk;
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
- sequencer.readCallback(address, getCacheEntry(address).DataBlk, GenericMachineType:L1Cache, PrefetchBit:No);
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+
+ sequencer.readCallback(address,
+ GenericMachineType:L1Cache,
+ cache_entry.DataBlk);
+
}
action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- sequencer.readCallback(address, getCacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
+ sequencer.readCallback(address,
+ getNondirectHitMachType(address, in_msg.Sender),
+ cache_entry.DataBlk);
+
}
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
- sequencer.writeCallback(address, getCacheEntry(address).DataBlk, GenericMachineType:L1Cache, PrefetchBit:No);
- getCacheEntry(address).Dirty := true;
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+
+ sequencer.writeCallback(address,
+ GenericMachineType:L1Cache,
+ cache_entry.DataBlk);
+
+ cache_entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
}
action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- sequencer.writeCallback(address, getCacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
+
+ sequencer.writeCallback(address,
+ getNondirectHitMachType(address, in_msg.Sender),
+ cache_entry.DataBlk);
+
}
- getCacheEntry(address).Dirty := true;
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ cache_entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
- L1_TBEs[address].IssueCount := 0;
- peek(mandatoryQueue_in, CacheMsg) {
- L1_TBEs[address].PC := in_msg.ProgramCounter;
- L1_TBEs[address].AccessType := cache_request_type_to_access_type(in_msg.Type);
- L1_TBEs[address].Prefetch := in_msg.Prefetch;
- L1_TBEs[address].AccessMode := in_msg.AccessMode;
+ set_tbe(L1_TBEs[address]);
+ tbe.IssueCount := 0;
+ peek(mandatoryQueue_in, RubyRequest) {
+ tbe.PC := in_msg.ProgramCounter;
+ tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
+ if (in_msg.Type == RubyRequestType:ATOMIC) {
+ tbe.IsAtomic := true;
+ }
+ tbe.Prefetch := in_msg.Prefetch;
+ tbe.AccessMode := in_msg.AccessMode;
}
- L1_TBEs[address].IssueTime := get_time();
+ tbe.IssueTime := get_time();
}
+ action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
+ }
+ }
action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
if (reissueTimerTable.isSet(address)) {
useTimerTable.unset(address);
}
-
-
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
}
action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:INV;
out_msg.Tokens := 0;
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.DestMachine := MachineType:L2Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
assert(in_msg.Tokens != 0);
- DEBUG_EXPR("MRM_DEBUG L1 received tokens");
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.Tokens);
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
- DEBUG_EXPR(getCacheEntry(address).Tokens);
-
- if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
- getCacheEntry(address).Dirty := true;
+ DPRINTF(RubySlicc, "L1 received tokens for address: %s, tokens: %d\n",
+ in_msg.Address, in_msg.Tokens);
+ cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
+ DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
+
+ if (cache_entry.Dirty == false && in_msg.Dirty) {
+ cache_entry.Dirty := true;
}
}
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- if (L1_TBEs[address].WentPersistent) {
+ assert(is_valid(tbe));
+ if (tbe.WentPersistent) {
// assert(starving == true);
outstandingRequests := outstandingRequests - 1;
- enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
out_msg.Address := address;
out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Persistent_Control;
}
}
// Update average latency
- if (L1_TBEs[address].IssueCount <= 1) {
- if (L1_TBEs[address].ExternalResponse == true) {
- updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(L1_TBEs[address].IssueTime));
+ if (tbe.IssueCount <= 1) {
+ if (tbe.ExternalResponse == true) {
+ updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(tbe.IssueTime));
}
}
// Profile
- //if (L1_TBEs[address].WentPersistent) {
- // profile_token_retry(address, L1_TBEs[address].AccessType, 2);
+ //if (tbe.WentPersistent) {
+ // profile_token_retry(address, tbe.AccessType, 2);
//}
//else {
- // profile_token_retry(address, L1_TBEs[address].AccessType, 1);
+ // profile_token_retry(address, tbe.AccessType, 1);
//}
- profile_token_retry(address, L1_TBEs[address].AccessType, L1_TBEs[address].IssueCount);
+ //profile_token_retry(address, tbe.AccessType, tbe.IssueCount);
L1_TBEs.deallocate(address);
+ unset_tbe();
}
action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
- if (getCacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:ACK;
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
- assert(getCacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getCacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(u_writeDataToCache, "u", desc="Write data to cache") {
peek(responseNetwork_in, ResponseMsg) {
- getCacheEntry(address).DataBlk := in_msg.DataBlk;
- if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
- getCacheEntry(address).Dirty := in_msg.Dirty;
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if (cache_entry.Dirty == false && in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
}
}
}
action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ assert(getTokens(cache_entry) == 0);
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
}
+ unset_cache_entry();
}
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (L1DcacheMemory.isTagPresent(address) == false) {
- L1DcacheMemory.allocate(address);
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (L1IcacheMemory.isTagPresent(address) == false) {
- L1IcacheMemory.allocate(address);
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
}
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
- // profile_miss(in_msg, id);
+ peek(mandatoryQueue_in, RubyRequest) {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.profileMiss(in_msg);
+ } else {
+ L1IcacheMemory.profileMiss(in_msg);
+ }
}
}
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
peek(responseNetwork_in, ResponseMsg) {
- assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
}
}
+ action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
+ }
+ stall_and_wait(mandatoryQueue_in, address);
+ }
- action(z_stall, "z", desc="Stall") {
-
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
}
- action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- mandatoryQueue_in.recycle();
+ action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
}
//*****************************************************
// Transitions for Load/Store/L2_Replacement from transient states
transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
- zz_recycleMandatoryQueue;
+ ta_traceStalledAddress;
+ zz_stallAndWaitMandatoryQueue;
}
- transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, Store) {
- zz_recycleMandatoryQueue;
+ transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, {Store, Atomic}) {
+ zz_stallAndWaitMandatoryQueue;
}
transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
- zz_recycleMandatoryQueue;
+ zz_stallAndWaitMandatoryQueue;
}
k_popMandatoryQueue;
}
- transition(NP, Store, IM) {
+ transition(NP, {Store, Atomic}, IM) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueWriteRequest;
m_popRequestQueue;
}
- transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(NP, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
l_popPersistentQueue;
}
k_popMandatoryQueue;
}
- transition(I, Store, IM) {
+ transition(I, {Store, Atomic}, IM) {
i_allocateTBE;
b_issueWriteRequest;
uu_profileMiss;
}
transition(I, L1_Replacement) {
- cc_sharedReplacement;
+ ta_traceStalledAddress;
+ tr_tokenReplacement;
gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
}
transition(I, {Transient_GETX, Transient_Local_GETX}) {
m_popRequestQueue;
}
- transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(I, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
- transition(I_L, {Persistent_GETX, Persistent_GETS}) {
+ transition(I_L, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}
k_popMandatoryQueue;
}
- transition(S, Store, SM) {
+ transition(S, {Store, Atomic}, SM) {
i_allocateTBE;
b_issueWriteRequest;
uu_profileMiss;
}
transition(S, L1_Replacement, I) {
+ ta_traceStalledAddress;
cc_sharedReplacement; // Only needed in some cases
gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
}
transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
l_popPersistentQueue;
}
- transition(S, Persistent_GETS, S_L) {
+ transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
f_sendAckWithAllButNorOneTokens;
l_popPersistentQueue;
}
- transition(S_L, Persistent_GETS) {
+ transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}
k_popMandatoryQueue;
}
- transition(O, Store, OM) {
+ transition(O, {Store, Atomic}, OM) {
i_allocateTBE;
b_issueWriteRequest;
uu_profileMiss;
}
transition(O, L1_Replacement, I) {
+ ta_traceStalledAddress;
c_ownedReplacement;
gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
}
transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
l_popPersistentQueue;
}
+ transition(O, Persistent_GETS_Last_Token, I_L) {
+ fo_sendDataWithOwnerToken;
+ l_popPersistentQueue;
+ }
+
transition(O, Transient_GETS) {
d_sendDataWithToken;
m_popRequestQueue;
k_popMandatoryQueue;
}
- transition({MM, MM_W}, Store) {
+ transition({MM_W}, {Store, Atomic}) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Atomic, M) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(MM, L1_Replacement, I) {
+ ta_traceStalledAddress;
c_ownedReplacement;
gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
}
transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
l_popPersistentQueue;
}
-
transition(MM_W, Use_TimeoutNoStarvers, MM) {
s_deallocateTBE;
jj_unsetUseTimer;
+ kd_wakeUpDependents;
+ }
+
+ transition(MM_W, Use_TimeoutNoStarvers_NoMig, M) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ kd_wakeUpDependents;
}
// Transitions from Dirty Exclusive
k_popMandatoryQueue;
}
+ transition(M, Atomic) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
transition(M_W, Store, MM_W) {
hh_store_hit;
k_popMandatoryQueue;
}
+ transition(M_W, Atomic) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
transition(M, L1_Replacement, I) {
+ ta_traceStalledAddress;
c_ownedReplacement;
gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
}
transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
}
// someone unlocked during timeout
- transition(M_W, Use_TimeoutNoStarvers, M) {
+ transition(M_W, {Use_TimeoutNoStarvers, Use_TimeoutNoStarvers_NoMig}, M) {
s_deallocateTBE;
jj_unsetUseTimer;
+ kd_wakeUpDependents;
}
transition(M_W, Use_TimeoutStarverX, I_L) {
m_popRequestQueue;
}
- transition(IS, {Persistent_GETX, Persistent_GETS}, IS_L) {
+ transition(IS, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IS_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
l_popPersistentQueue;
}
- transition(IM, {Persistent_GETX, Persistent_GETS}, IM_L) {
+ transition(IM, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IM_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
l_popPersistentQueue;
}
- transition(SM, Persistent_GETS, SM_L) {
+ transition(SM, {Persistent_GETS, Persistent_GETS_Last_Token}, SM_L) {
f_sendAckWithAllButNorOneTokens;
l_popPersistentQueue;
}
- transition(SM_L, Persistent_GETS) {
+ transition(SM_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}
l_popPersistentQueue;
}
+ transition(OM, Persistent_GETS_Last_Token, IM_L) {
+ fo_sendDataWithOwnerToken;
+ l_popPersistentQueue;
+ }
+
// Transitions from IM/SM
transition({IM, SM}, Ack) {
o_scheduleUseTimeout;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(SM, Data_Shared) {
o_scheduleUseTimeout;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
o_scheduleUseTimeout;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(OM, Data_Shared) {
o_scheduleUseTimeout;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(OM, Request_Timeout) {
s_deallocateTBE;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS, Data_Owner, O) {
s_deallocateTBE;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS, Data_All_Tokens, M_W) {
o_scheduleUseTimeout;
j_unsetReissueTimer;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS, Request_Timeout) {
k_popMandatoryQueue;
}
- transition(I_L, Store, IM_L) {
+ transition(I_L, {Store, Atomic}, IM_L) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueWriteRequest;
// Transitions from S_L
- transition(S_L, Store, SM_L) {
+ transition(S_L, {Store, Atomic}, SM_L) {
i_allocateTBE;
b_issueWriteRequest;
uu_profileMiss;
j_unsetReissueTimer;
o_scheduleUseTimeout;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(SM_L, Data_All_Tokens, S_L) {
j_unsetReissueTimer;
o_scheduleUseTimeout;
n_popResponseQueue;
+ kd_wakeUpDependents;
}
transition(I_L, Own_Lock_or_Unlock, I) {
l_popPersistentQueue;
+ kd_wakeUpDependents;
}
transition(S_L, Own_Lock_or_Unlock, S) {
l_popPersistentQueue;
+ kd_wakeUpDependents;
}
transition(IM_L, Own_Lock_or_Unlock, IM) {
l_popPersistentQueue;
+ kd_wakeUpDependents;
}
transition(IS_L, Own_Lock_or_Unlock, IS) {
l_popPersistentQueue;
+ kd_wakeUpDependents;
}
transition(SM_L, Own_Lock_or_Unlock, SM) {
l_popPersistentQueue;
+ kd_wakeUpDependents;
}
}