--- /dev/null
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(L1Cache, "AMD Hammer-like protocol") {
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified (dirty)";
+ MM, desc="Modified (dirty and locally modified)";
+
+ // Transient States
+ IM, "IM", desc="Issued GetX";
+ SM, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, "OM", desc="Issued GetX, received data";
+ ISM, "ISM", desc="Issued GetX, received data, waiting for all acks";
+ M_W, "M^W", desc="Issued GetS, received exclusive data";
+ MM_W, "MM^W", desc="Issued GetX, received exclusive data";
+ IS, "IS", desc="Issued GetS";
+ SS, "SS", desc="Issued GetS, received data, waiting for all acks";
+ OI, "OI", desc="Issued PutO, waiting for ack";
+ MI, "MI", desc="Issued PutX, waiting for ack";
+ II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L2_Replacement, desc="L2 Replacement";
+ L1_to_L2, desc="L1 to L2 transfer";
+ L2_to_L1D, desc="L2 to L1-Data transfer";
+ L2_to_L1I, desc="L2 to L1-Instruction transfer";
+
+ // Requests
+ Other_GETX, desc="A GetX from another processor";
+ Other_GETS, desc="A GetS from another processor";
+
+ // Responses
+ Ack, desc="Received an ack message";
+ Shared_Ack, desc="Received an ack message, responder has a shared copy";
+ Data, desc="Received a data message";
+ Shared_Data, desc="Received a data message, responder has a shared copy";
+ Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ // Triggers
+ All_acks, desc="Received all required data and message acks";
+ All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...") {
+ Address Address, desc="Address of this block, required by CacheMemory";
+ Time LastRef, desc="Last time this block was referenced, required by CacheMemory";
+ AccessPermission Permission, desc="Access permission for this block, required by CacheMemory";
+ DataBlock DataBlk, desc="data for the block, required by CacheMemory";
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Sharers, desc="On a GetS, did we find any other sharers in the system";
+ }
+
+ external_type(NewCacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(NewTBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ NewTBETable TBEs, template_hack="<L1Cache_TBE>";
+ NewCacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,"L1I"';
+ NewCacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,"L1D"';
+ NewCacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,"L2"';
+
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory[addr];
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory[addr];
+ } else {
+ return L1IcacheMemory[addr];
+ }
+ }
+
+ void changePermission(Address addr, AccessPermission permission) {
+ if (L2cacheMemory.isTagPresent(addr)) {
+ return L2cacheMemory.changePermission(addr, permission);
+ } else if (L1DcacheMemory.isTagPresent(addr)) {
+ return L1DcacheMemory.changePermission(addr, permission);
+ } else {
+ return L1IcacheMemory.changePermission(addr, permission);
+ }
+ }
+
+ bool isCacheTagPresent(Address addr) {
+ return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ }
+
+ State getState(Address addr) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (isCacheTagPresent(addr)) {
+ return getCacheEntry(addr).CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+ assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
+
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+
+ if (isCacheTagPresent(addr)) {
+ getCacheEntry(addr).CacheState := state;
+
+ // Set permission
+ if ((state == State:MM) ||
+ (state == State:MM_W)) {
+ changePermission(addr, AccessPermission:Read_Write);
+ } else if (state == State:S ||
+ state == State:O ||
+ state == State:M ||
+ state == State:M_W ||
+ state == State:SM ||
+ state == State:ISM ||
+ state == State:OM ||
+ state == State:SS) {
+ changePermission(addr, AccessPermission:Read_Only);
+ } else {
+ changePermission(addr, AccessPermission:Invalid);
+ }
+ }
+ }
+
+ Event mandatory_request_type_to_event(CacheRequestType type) {
+ if (type == CacheRequestType:LD) {
+ return Event:Load;
+ } else if (type == CacheRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+
+ MessageBuffer triggerQueue, ordered="true";
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_acks, in_msg.Address);
+ } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
+ trigger(Event:All_acks_no_sharers, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the request network
+
+ // Forward Network
+ in_port(forwardToCache_in, RequestMsg, forwardToCache) {
+ if (forwardToCache_in.isReady()) {
+ peek(forwardToCache_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Other_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Other_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseToCache_in, ResponseMsg, responseToCache) {
+ if (responseToCache_in.isReady()) {
+ peek(responseToCache_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
+ trigger(Event:Shared_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Shared_Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Exclusive_Data, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the unblock network
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == CacheRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1I, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ // Check to see if it is in the OTHER L1
+ if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ trigger(Event:L1_to_L2, in_msg.Address);
+ } else {
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ }
+ }
+
+ if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ } else {
+ if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ // L1 does't have the line, but we have space for it in the L1
+ if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:L2_to_L1D, in_msg.Address);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := id;
+ out_msg.Destination.add(map_address_to_node(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := id;
+ out_msg.Destination.add(map_address_to_node(address));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
+ }
+ }
+
+ action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := 2;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d_issuePUT, "d", desc="Issue PUT") {
+ enqueue(requestNetwork_out, RequestMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUT;
+ out_msg.Requestor := id;
+ out_msg.Destination.add(map_address_to_node(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := 2;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Acks := 2;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(f_sendAck, "f", desc="Send ack from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK_SHARED;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(g_sendUnblock, "g", desc="Send unblock to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := id;
+ out_msg.Destination.add(map_address_to_node(address));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
+ getCacheEntry(address).Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
+ TBEs[address].Dirty := getCacheEntry(address).Dirty;
+ TBEs[address].Sharers := false;
+ }
+
+ action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
+ forwardToCache_in.dequeue();
+ }
+
+ action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(in_msg.Acks > 0);
+ DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
+ DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseToCache_in.dequeue();
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ if (TBEs[address].NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ if (TBEs[address].Sharers) {
+ out_msg.Type := TriggerType:ALL_ACKS;
+ } else {
+ out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
+ }
+ }
+ }
+ }
+
+ action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
+ }
+
+ action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
+ TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1;
+ }
+
+ action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.Dirty := TBEs[address].Dirty;
+ out_msg.Acks := 2;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := id;
+ out_msg.Destination.add(map_address_to_node(address));
+ out_msg.Dirty := TBEs[address].Dirty;
+ if (TBEs[address].Dirty) {
+ out_msg.Type := CoherenceResponseType:WB_DIRTY;
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:WB_CLEAN;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(r_setSharerBit, "r", desc="We saw other sharers") {
+ TBEs[address].Sharers := true;
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := id;
+ out_msg.Destination.add(map_address_to_node(address));
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.Dirty := TBEs[address].Dirty;
+ if (TBEs[address].Dirty) {
+ out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseToCache_in, ResponseMsg) {
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory.deallocate(address);
+ } else {
+ L1IcacheMemory.deallocate(address);
+ }
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (L1DcacheMemory.isTagPresent(address) == false) {
+ L1DcacheMemory.allocate(address);
+ }
+ }
+
+ action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (L1IcacheMemory.isTagPresent(address) == false) {
+ L1IcacheMemory.allocate(address);
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ L2cacheMemory.allocate(address);
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cacheMemory.deallocate(address);
+ }
+
+ action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L2cacheMemory[address] := L1DcacheMemory[address];
+ } else {
+ L2cacheMemory[address] := L1IcacheMemory[address];
+ }
+ }
+
+ action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
+ if (L1DcacheMemory.isTagPresent(address)) {
+ L1DcacheMemory[address] := L2cacheMemory[address];
+ } else {
+ L1IcacheMemory[address] := L2cacheMemory[address];
+ }
+ }
+
+ action(uu_profileMiss, "\u", desc="Profile the demand miss") {
+ peek(mandatoryQueue_in, CacheMsg) {
+ profile_miss(in_msg, id);
+ }
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, ISM, OM, IS, SS, OI, MI, II}, {Store, L2_Replacement}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({M_W, MM_W}, {L2_Replacement}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, IS, OI, MI, II}, {Load, Ifetch}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II}, L1_to_L2) {
+ zz_recycleMandatoryQueue;
+ }
+
+ // Transitions moving data between the L1 and L2 caches
+ transition({I, S, O, M, MM}, L1_to_L2) {
+ vv_allocateL2CacheBlock;
+ ss_copyFromL1toL2; // Not really needed for state I
+ gg_deallocateL1CacheBlock;
+ }
+
+ transition({I, S, O, M, MM}, L2_to_L1D) {
+ ii_allocateL1DCacheBlock;
+ tt_copyFromL2toL1; // Not really needed for state I
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({I, S, O, M, MM}, L2_to_L1I) {
+ jj_allocateL1ICacheBlock;
+ tt_copyFromL2toL1; // Not really needed for state I
+ rr_deallocateL2CacheBlock;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, IS) {
+ jj_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L2_Replacement) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I, {Other_GETX, Other_GETS}) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM, ISM}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L2_Replacement, I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(S, Other_GETX, I) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(S, Other_GETS) {
+ ff_sendAckShared;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ p_decrementNumberOfMessagesByOne;
+ uu_profileMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L2_Replacement, OI) {
+ i_allocateTBE;
+ d_issuePUT;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(O, Other_GETX, I) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(O, Other_GETS) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Modified
+ transition(MM, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L2_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUT;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MM, Other_GETX, I) {
+ c_sendExclusiveData;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Other_GETS, I) {
+ c_sendExclusiveData;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Dirty Exclusive
+ transition(M, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store, MM) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L2_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUT;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, Other_GETX, I) {
+ c_sendExclusiveData;
+ l_popForwardQueue;
+ }
+
+ transition(M, Other_GETS, O) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ // Transitions from IM
+
+ transition(IM, {Other_GETX, Other_GETS}) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data, ISM) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Exclusive_Data, MM_W) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ hh_store_hit;
+ n_popResponseQueue;
+ }
+
+ // Transitions from SM
+ transition(SM, Other_GETS) {
+ ff_sendAckShared;
+ l_popForwardQueue;
+ }
+
+ transition(SM, Other_GETX, IM) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(SM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data, ISM) {
+ v_writeDataToCacheVerify;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from ISM
+ transition(ISM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(ISM, All_acks_no_sharers, MM) {
+ hh_store_hit;
+ g_sendUnblock;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Transitions from OM
+
+ transition(OM, Other_GETX, IM) {
+ e_sendData;
+ pp_incrementNumberOfMessagesByOne;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Other_GETS) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OM, {All_acks, All_acks_no_sharers}, MM) {
+ hh_store_hit;
+ g_sendUnblock;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Transitions from IS
+
+ transition(IS, {Other_GETX, Other_GETS}) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IS, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data, SS) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ h_load_hit;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Exclusive_Data, M_W) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ h_load_hit;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Shared_Data, SS) {
+ u_writeDataToCache;
+ r_setSharerBit;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ h_load_hit;
+ n_popResponseQueue;
+ }
+
+ // Transitions from SS
+
+ transition(SS, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SS, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SS, All_acks, S) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ transition(SS, All_acks_no_sharers, S) {
+ // Note: The directory might still be the owner, so that is why we go to S
+ g_sendUnblock;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Transitions from MM_W
+
+ transition(MM_W, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM_W, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(MM_W, All_acks_no_sharers, MM) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Transitions from M_W
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(M_W, All_acks_no_sharers, M) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ }
+
+ // Transitions from OI/MI
+
+ transition({OI, MI}, Other_GETX, II) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({OI, MI}, Other_GETS, OI) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Writeback_Ack, I) {
+ t_sendExclusiveDataFromTBEToMemory;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition(OI, Writeback_Ack, I) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ // Transitions from II
+ transition(II, {Other_GETS, Other_GETX}, II) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(II, Writeback_Ack, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+
+ transition(II, Writeback_Nack, I) {
+ s_deallocateTBE;
+ l_popForwardQueue;
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(Directory, "AMD Hammer-like protocol") {
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_E") {
+ // Base states
+ NO, desc="Not Owner";
+ O, desc="Owner";
+ E, desc="Exclusive Owner (we can provide the data in exclusive)";
+ NO_B, "NO^B", desc="Not Owner, Blocked";
+ O_B, "O^B", desc="Owner, Blocked";
+ WB, desc="Blocked on a writeback";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUT, desc="A PUT arrives";
+ Unblock, desc="An unblock message arrives";
+ Writeback_Clean, desc="The final part of a PutX (no data)";
+ Writeback_Dirty, desc="The final part of a PutX (data)";
+ Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
+ Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory;
+
+ State getState(Address addr) {
+ return directory[addr].DirectoryState;
+ }
+
+ void setState(Address addr, State state) {
+ directory[addr].DirectoryState := state;
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
+
+ // ** IN_PORTS **
+
+ in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
+ if (unblockNetwork_in.isReady()) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
+ trigger(Event:Writeback_Clean, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
+ trigger(Event:Writeback_Dirty, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
+ trigger(Event:Writeback_Exclusive_Clean, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
+ trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady()) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:PUT) {
+ trigger(Event:PUT, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := 1;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(dd_sendExclusiveData, "\d", desc="Send exclusive data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := id;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := 1;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward requests") {
+ if (numberOfNodes() > 1) {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.broadcast(); // Send to everyone, but...
+ out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue();
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue();
+ }
+
+ action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // NOTE: The following check would not be valid in a real
+ // implementation. We include the data in the "dataless"
+ // message so we can assert the clean data matches the datablock
+ // in memory
+ assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ // action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ // }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ requestQueue_in.recycle();
+ }
+
+ // TRANSITIONS
+
+ transition(E, GETX, NO_B) {
+ dd_sendExclusiveData;
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(E, GETS, NO_B) {
+ dd_sendExclusiveData;
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ //
+ transition(O, GETX, NO_B) {
+ d_sendData;
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETS, O_B) {
+ d_sendData;
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ //
+ transition(NO, GETX, NO_B) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NO, GETS, NO_B) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NO, PUT, WB) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O, E}, PUT) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ // Blocked states
+ transition({NO_B, O_B, WB}, {GETS, GETX, PUT}) {
+ zz_recycleRequest;
+ }
+
+ transition(NO_B, Unblock, NO) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(O_B, Unblock, O) {
+ j_popIncomingUnblockQueue;
+ }
+
+ // WB
+ transition(WB, Writeback_Dirty, O) {
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Writeback_Exclusive_Dirty, E) {
+ l_writeDataToMemory;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Writeback_Clean, O) {
+ ll_checkIncomingWriteback;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Writeback_Exclusive_Clean, E) {
+ ll_checkIncomingWriteback;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Unblock, NO) {
+ j_popIncomingUnblockQueue;
+ }
+}